1 | /* |
2 | * Copyright (C) 2007, 2008, 2012-2015 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
14 | * its contributors may be used to endorse or promote products derived |
15 | * from this software without specific prior written permission. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
18 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
19 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
20 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
21 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
22 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
23 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
28 | |
29 | #pragma once |
30 | |
31 | #include "ConcurrentJSLock.h" |
32 | #include "ConstantMode.h" |
33 | #include "InferredValue.h" |
34 | #include "JSObject.h" |
35 | #include "ScopedArgumentsTable.h" |
36 | #include "TypeLocation.h" |
37 | #include "VarOffset.h" |
38 | #include "Watchpoint.h" |
39 | #include <memory> |
40 | #include <wtf/HashTraits.h> |
41 | #include <wtf/text/UniquedStringImpl.h> |
42 | |
43 | namespace JSC { |
44 | |
45 | class SymbolTable; |
46 | |
47 | static ALWAYS_INLINE int missingSymbolMarker() { return std::numeric_limits<int>::max(); } |
48 | |
49 | // The bit twiddling in this class assumes that every register index is a |
50 | // reasonably small positive or negative number, and therefore has its high |
51 | // four bits all set or all unset. |
52 | |
53 | // In addition to implementing semantics-mandated variable attributes and |
54 | // implementation-mandated variable indexing, this class also implements |
55 | // watchpoints to be used for JIT optimizations. Because watchpoints are |
56 | // meant to be relatively rare, this class optimizes heavily for the case |
57 | // that they are not being used. To that end, this class uses the thin-fat |
58 | // idiom: either it is thin, in which case it contains an in-place encoded |
59 | // word that consists of attributes, the index, and a bit saying that it is |
60 | // thin; or it is fat, in which case it contains a pointer to a malloc'd |
61 | // data structure and a bit saying that it is fat. The malloc'd data |
62 | // structure will be malloced a second time upon copy, to preserve the |
63 | // property that in-place edits to SymbolTableEntry do not manifest in any |
64 | // copies. However, the malloc'd FatEntry data structure contains a ref- |
65 | // counted pointer to a shared WatchpointSet. Thus, in-place edits of the |
66 | // WatchpointSet will manifest in all copies. Here's a picture: |
67 | // |
68 | // SymbolTableEntry --> FatEntry --> WatchpointSet |
69 | // |
70 | // If you make a copy of a SymbolTableEntry, you will have: |
71 | // |
72 | // original: SymbolTableEntry --> FatEntry --> WatchpointSet |
73 | // copy: SymbolTableEntry --> FatEntry -----^ |
74 | |
75 | struct SymbolTableEntry { |
76 | friend class CachedSymbolTableEntry; |
77 | |
78 | private: |
79 | static VarOffset varOffsetFromBits(intptr_t bits) |
80 | { |
81 | VarKind kind; |
82 | intptr_t kindBits = bits & KindBitsMask; |
83 | if (kindBits <= UnwatchableScopeKindBits) |
84 | kind = VarKind::Scope; |
85 | else if (kindBits == StackKindBits) |
86 | kind = VarKind::Stack; |
87 | else |
88 | kind = VarKind::DirectArgument; |
89 | return VarOffset::assemble(kind, static_cast<int>(bits >> FlagBits)); |
90 | } |
91 | |
92 | static ScopeOffset scopeOffsetFromBits(intptr_t bits) |
93 | { |
94 | ASSERT((bits & KindBitsMask) <= UnwatchableScopeKindBits); |
95 | return ScopeOffset(static_cast<int>(bits >> FlagBits)); |
96 | } |
97 | |
98 | public: |
99 | |
100 | // Use the SymbolTableEntry::Fast class, either via implicit cast or by calling |
101 | // getFast(), when you (1) only care about isNull(), getIndex(), and isReadOnly(), |
102 | // and (2) you are in a hot path where you need to minimize the number of times |
103 | // that you branch on isFat() when getting the bits(). |
104 | class Fast { |
105 | public: |
106 | Fast() |
107 | : m_bits(SlimFlag) |
108 | { |
109 | } |
110 | |
111 | ALWAYS_INLINE Fast(const SymbolTableEntry& entry) |
112 | : m_bits(entry.bits()) |
113 | { |
114 | } |
115 | |
116 | bool isNull() const |
117 | { |
118 | return !(m_bits & ~SlimFlag); |
119 | } |
120 | |
121 | VarOffset varOffset() const |
122 | { |
123 | return varOffsetFromBits(m_bits); |
124 | } |
125 | |
126 | // Asserts if the offset is anything but a scope offset. This structures the assertions |
127 | // in a way that may result in better code, even in release, than doing |
128 | // varOffset().scopeOffset(). |
129 | ScopeOffset scopeOffset() const |
130 | { |
131 | return scopeOffsetFromBits(m_bits); |
132 | } |
133 | |
134 | bool isReadOnly() const |
135 | { |
136 | return m_bits & ReadOnlyFlag; |
137 | } |
138 | |
139 | bool isDontEnum() const |
140 | { |
141 | return m_bits & DontEnumFlag; |
142 | } |
143 | |
144 | unsigned getAttributes() const |
145 | { |
146 | unsigned attributes = 0; |
147 | if (isReadOnly()) |
148 | attributes |= PropertyAttribute::ReadOnly; |
149 | if (isDontEnum()) |
150 | attributes |= PropertyAttribute::DontEnum; |
151 | return attributes; |
152 | } |
153 | |
154 | bool isFat() const |
155 | { |
156 | return !(m_bits & SlimFlag); |
157 | } |
158 | |
159 | private: |
160 | friend struct SymbolTableEntry; |
161 | intptr_t m_bits; |
162 | }; |
163 | |
164 | SymbolTableEntry() |
165 | : m_bits(SlimFlag) |
166 | { |
167 | } |
168 | |
169 | SymbolTableEntry(VarOffset offset) |
170 | : m_bits(SlimFlag) |
171 | { |
172 | ASSERT(isValidVarOffset(offset)); |
173 | pack(offset, true, false, false); |
174 | } |
175 | |
176 | SymbolTableEntry(VarOffset offset, unsigned attributes) |
177 | : m_bits(SlimFlag) |
178 | { |
179 | ASSERT(isValidVarOffset(offset)); |
180 | pack(offset, true, attributes & PropertyAttribute::ReadOnly, attributes & PropertyAttribute::DontEnum); |
181 | } |
182 | |
183 | ~SymbolTableEntry() |
184 | { |
185 | freeFatEntry(); |
186 | } |
187 | |
188 | SymbolTableEntry(const SymbolTableEntry& other) |
189 | : m_bits(SlimFlag) |
190 | { |
191 | *this = other; |
192 | } |
193 | |
194 | SymbolTableEntry& operator=(const SymbolTableEntry& other) |
195 | { |
196 | if (UNLIKELY(other.isFat())) |
197 | return copySlow(other); |
198 | freeFatEntry(); |
199 | m_bits = other.m_bits; |
200 | return *this; |
201 | } |
202 | |
203 | SymbolTableEntry(SymbolTableEntry&& other) |
204 | : m_bits(SlimFlag) |
205 | { |
206 | swap(other); |
207 | } |
208 | |
209 | SymbolTableEntry& operator=(SymbolTableEntry&& other) |
210 | { |
211 | swap(other); |
212 | return *this; |
213 | } |
214 | |
215 | void swap(SymbolTableEntry& other) |
216 | { |
217 | std::swap(m_bits, other.m_bits); |
218 | } |
219 | |
220 | bool isNull() const |
221 | { |
222 | return !(bits() & ~SlimFlag); |
223 | } |
224 | |
225 | VarOffset varOffset() const |
226 | { |
227 | return varOffsetFromBits(bits()); |
228 | } |
229 | |
230 | bool isWatchable() const |
231 | { |
232 | return (m_bits & KindBitsMask) == ScopeKindBits && VM::canUseJIT(); |
233 | } |
234 | |
235 | // Asserts if the offset is anything but a scope offset. This structures the assertions |
236 | // in a way that may result in better code, even in release, than doing |
237 | // varOffset().scopeOffset(). |
238 | ScopeOffset scopeOffset() const |
239 | { |
240 | return scopeOffsetFromBits(bits()); |
241 | } |
242 | |
243 | ALWAYS_INLINE Fast getFast() const |
244 | { |
245 | return Fast(*this); |
246 | } |
247 | |
248 | ALWAYS_INLINE Fast getFast(bool& wasFat) const |
249 | { |
250 | Fast result; |
251 | wasFat = isFat(); |
252 | if (wasFat) |
253 | result.m_bits = fatEntry()->m_bits | SlimFlag; |
254 | else |
255 | result.m_bits = m_bits; |
256 | return result; |
257 | } |
258 | |
259 | unsigned getAttributes() const |
260 | { |
261 | return getFast().getAttributes(); |
262 | } |
263 | |
264 | void setAttributes(unsigned attributes) |
265 | { |
266 | pack(varOffset(), isWatchable(), attributes & PropertyAttribute::ReadOnly, attributes & PropertyAttribute::DontEnum); |
267 | } |
268 | |
269 | bool isReadOnly() const |
270 | { |
271 | return bits() & ReadOnlyFlag; |
272 | } |
273 | |
274 | ConstantMode constantMode() const |
275 | { |
276 | return modeForIsConstant(isReadOnly()); |
277 | } |
278 | |
279 | bool isDontEnum() const |
280 | { |
281 | return bits() & DontEnumFlag; |
282 | } |
283 | |
284 | void disableWatching(VM& vm) |
285 | { |
286 | if (WatchpointSet* set = watchpointSet()) |
287 | set->invalidate(vm, "Disabling watching in symbol table" ); |
288 | if (varOffset().isScope()) |
289 | pack(varOffset(), false, isReadOnly(), isDontEnum()); |
290 | } |
291 | |
292 | void prepareToWatch(); |
293 | |
294 | // This watchpoint set is initialized clear, and goes through the following state transitions: |
295 | // |
296 | // First write to this var, in any scope that has this symbol table: Clear->IsWatched. |
297 | // |
298 | // Second write to this var, in any scope that has this symbol table: IsWatched->IsInvalidated. |
299 | // |
300 | // We ensure that we touch the set (i.e. trigger its state transition) after we do the write. This |
301 | // means that if you're in the compiler thread, and you: |
302 | // |
303 | // 1) Observe that the set IsWatched and commit to adding your watchpoint. |
304 | // 2) Load a value from any scope that has this watchpoint set. |
305 | // |
306 | // Then you can be sure that that value is either going to be the correct value for that var forever, |
307 | // or the watchpoint set will invalidate and you'll get fired. |
308 | // |
309 | // It's possible to write a program that first creates multiple scopes with the same var, and then |
310 | // initializes that var in just one of them. This means that a compilation could constant-fold to one |
311 | // of the scopes that still has an undefined value for this variable. That's fine, because at that |
312 | // point any write to any of the instances of that variable would fire the watchpoint. |
313 | // |
314 | // Note that watchpointSet() returns nullptr if JIT is disabled. |
315 | WatchpointSet* watchpointSet() |
316 | { |
317 | if (!isFat()) |
318 | return nullptr; |
319 | return fatEntry()->m_watchpoints.get(); |
320 | } |
321 | |
322 | private: |
323 | static const intptr_t SlimFlag = 0x1; |
324 | static const intptr_t ReadOnlyFlag = 0x2; |
325 | static const intptr_t DontEnumFlag = 0x4; |
326 | static const intptr_t NotNullFlag = 0x8; |
327 | static const intptr_t KindBitsMask = 0x30; |
328 | static const intptr_t ScopeKindBits = 0x00; |
329 | static const intptr_t UnwatchableScopeKindBits = 0x10; |
330 | static const intptr_t StackKindBits = 0x20; |
331 | static const intptr_t DirectArgumentKindBits = 0x30; |
332 | static const intptr_t FlagBits = 6; |
333 | |
334 | class FatEntry { |
335 | WTF_MAKE_FAST_ALLOCATED; |
336 | public: |
337 | FatEntry(intptr_t bits) |
338 | : m_bits(bits & ~SlimFlag) |
339 | { |
340 | } |
341 | |
342 | intptr_t m_bits; // always has FatFlag set and exactly matches what the bits would have been if this wasn't fat. |
343 | |
344 | RefPtr<WatchpointSet> m_watchpoints; |
345 | }; |
346 | |
347 | SymbolTableEntry& copySlow(const SymbolTableEntry&); |
348 | JS_EXPORT_PRIVATE void notifyWriteSlow(VM&, JSValue, const FireDetail&); |
349 | |
350 | bool isFat() const |
351 | { |
352 | return !(m_bits & SlimFlag); |
353 | } |
354 | |
355 | const FatEntry* fatEntry() const |
356 | { |
357 | ASSERT(isFat()); |
358 | return bitwise_cast<const FatEntry*>(m_bits); |
359 | } |
360 | |
361 | FatEntry* fatEntry() |
362 | { |
363 | ASSERT(isFat()); |
364 | return bitwise_cast<FatEntry*>(m_bits); |
365 | } |
366 | |
367 | FatEntry* inflate() |
368 | { |
369 | if (LIKELY(isFat())) |
370 | return fatEntry(); |
371 | return inflateSlow(); |
372 | } |
373 | |
374 | FatEntry* inflateSlow(); |
375 | |
376 | ALWAYS_INLINE intptr_t bits() const |
377 | { |
378 | if (isFat()) |
379 | return fatEntry()->m_bits; |
380 | return m_bits; |
381 | } |
382 | |
383 | ALWAYS_INLINE intptr_t& bits() |
384 | { |
385 | if (isFat()) |
386 | return fatEntry()->m_bits; |
387 | return m_bits; |
388 | } |
389 | |
390 | void freeFatEntry() |
391 | { |
392 | if (LIKELY(!isFat())) |
393 | return; |
394 | freeFatEntrySlow(); |
395 | } |
396 | |
397 | JS_EXPORT_PRIVATE void freeFatEntrySlow(); |
398 | |
399 | void pack(VarOffset offset, bool isWatchable, bool readOnly, bool dontEnum) |
400 | { |
401 | ASSERT(!isFat()); |
402 | intptr_t& bitsRef = bits(); |
403 | bitsRef = |
404 | (static_cast<intptr_t>(offset.rawOffset()) << FlagBits) | NotNullFlag | SlimFlag; |
405 | if (readOnly) |
406 | bitsRef |= ReadOnlyFlag; |
407 | if (dontEnum) |
408 | bitsRef |= DontEnumFlag; |
409 | switch (offset.kind()) { |
410 | case VarKind::Scope: |
411 | if (isWatchable) |
412 | bitsRef |= ScopeKindBits; |
413 | else |
414 | bitsRef |= UnwatchableScopeKindBits; |
415 | break; |
416 | case VarKind::Stack: |
417 | bitsRef |= StackKindBits; |
418 | break; |
419 | case VarKind::DirectArgument: |
420 | bitsRef |= DirectArgumentKindBits; |
421 | break; |
422 | default: |
423 | RELEASE_ASSERT_NOT_REACHED(); |
424 | break; |
425 | } |
426 | } |
427 | |
428 | static bool isValidVarOffset(VarOffset offset) |
429 | { |
430 | return ((static_cast<intptr_t>(offset.rawOffset()) << FlagBits) >> FlagBits) == static_cast<intptr_t>(offset.rawOffset()); |
431 | } |
432 | |
433 | intptr_t m_bits; |
434 | }; |
435 | |
436 | struct SymbolTableIndexHashTraits : HashTraits<SymbolTableEntry> { |
437 | static const bool needsDestruction = true; |
438 | }; |
439 | |
440 | class SymbolTable final : public JSCell { |
441 | friend class CachedSymbolTable; |
442 | |
443 | public: |
444 | typedef JSCell Base; |
445 | static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; |
446 | |
447 | typedef HashMap<RefPtr<UniquedStringImpl>, SymbolTableEntry, IdentifierRepHash, HashTraits<RefPtr<UniquedStringImpl>>, SymbolTableIndexHashTraits> Map; |
448 | typedef HashMap<RefPtr<UniquedStringImpl>, GlobalVariableID, IdentifierRepHash> UniqueIDMap; |
449 | typedef HashMap<RefPtr<UniquedStringImpl>, RefPtr<TypeSet>, IdentifierRepHash> UniqueTypeSetMap; |
450 | typedef HashMap<VarOffset, RefPtr<UniquedStringImpl>> OffsetToVariableMap; |
451 | typedef Vector<SymbolTableEntry*> LocalToEntryVec; |
452 | |
453 | static SymbolTable* create(VM& vm) |
454 | { |
455 | SymbolTable* symbolTable = new (NotNull, allocateCell<SymbolTable>(vm.heap)) SymbolTable(vm); |
456 | symbolTable->finishCreation(vm); |
457 | return symbolTable; |
458 | } |
459 | |
460 | static const bool needsDestruction = true; |
461 | static void destroy(JSCell*); |
462 | |
463 | static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) |
464 | { |
465 | return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); |
466 | } |
467 | |
468 | // You must hold the lock until after you're done with the iterator. |
469 | Map::iterator find(const ConcurrentJSLocker&, UniquedStringImpl* key) |
470 | { |
471 | return m_map.find(key); |
472 | } |
473 | |
474 | Map::iterator find(const GCSafeConcurrentJSLocker&, UniquedStringImpl* key) |
475 | { |
476 | return m_map.find(key); |
477 | } |
478 | |
479 | SymbolTableEntry get(const ConcurrentJSLocker&, UniquedStringImpl* key) |
480 | { |
481 | return m_map.get(key); |
482 | } |
483 | |
484 | SymbolTableEntry get(UniquedStringImpl* key) |
485 | { |
486 | ConcurrentJSLocker locker(m_lock); |
487 | return get(locker, key); |
488 | } |
489 | |
490 | SymbolTableEntry inlineGet(const ConcurrentJSLocker&, UniquedStringImpl* key) |
491 | { |
492 | return m_map.inlineGet(key); |
493 | } |
494 | |
495 | SymbolTableEntry inlineGet(UniquedStringImpl* key) |
496 | { |
497 | ConcurrentJSLocker locker(m_lock); |
498 | return inlineGet(locker, key); |
499 | } |
500 | |
501 | Map::iterator begin(const ConcurrentJSLocker&) |
502 | { |
503 | return m_map.begin(); |
504 | } |
505 | |
506 | Map::iterator end(const ConcurrentJSLocker&) |
507 | { |
508 | return m_map.end(); |
509 | } |
510 | |
511 | Map::iterator end(const GCSafeConcurrentJSLocker&) |
512 | { |
513 | return m_map.end(); |
514 | } |
515 | |
516 | size_t size(const ConcurrentJSLocker&) const |
517 | { |
518 | return m_map.size(); |
519 | } |
520 | |
521 | size_t size() const |
522 | { |
523 | ConcurrentJSLocker locker(m_lock); |
524 | return size(locker); |
525 | } |
526 | |
527 | ScopeOffset maxScopeOffset() const |
528 | { |
529 | return m_maxScopeOffset; |
530 | } |
531 | |
532 | void didUseScopeOffset(ScopeOffset offset) |
533 | { |
534 | if (!m_maxScopeOffset || m_maxScopeOffset < offset) |
535 | m_maxScopeOffset = offset; |
536 | } |
537 | |
538 | void didUseVarOffset(VarOffset offset) |
539 | { |
540 | if (offset.isScope()) |
541 | didUseScopeOffset(offset.scopeOffset()); |
542 | } |
543 | |
544 | unsigned scopeSize() const |
545 | { |
546 | ScopeOffset maxScopeOffset = this->maxScopeOffset(); |
547 | |
548 | // Do some calculation that relies on invalid scope offset plus one being zero. |
549 | unsigned fastResult = maxScopeOffset.offsetUnchecked() + 1; |
550 | |
551 | // Assert that this works. |
552 | ASSERT(fastResult == (!maxScopeOffset ? 0 : maxScopeOffset.offset() + 1)); |
553 | |
554 | return fastResult; |
555 | } |
556 | |
557 | ScopeOffset nextScopeOffset() const |
558 | { |
559 | return ScopeOffset(scopeSize()); |
560 | } |
561 | |
562 | ScopeOffset takeNextScopeOffset(const ConcurrentJSLocker&) |
563 | { |
564 | ScopeOffset result = nextScopeOffset(); |
565 | m_maxScopeOffset = result; |
566 | return result; |
567 | } |
568 | |
569 | ScopeOffset takeNextScopeOffset() |
570 | { |
571 | ConcurrentJSLocker locker(m_lock); |
572 | return takeNextScopeOffset(locker); |
573 | } |
574 | |
575 | template<typename Entry> |
576 | void add(const ConcurrentJSLocker&, UniquedStringImpl* key, Entry&& entry) |
577 | { |
578 | RELEASE_ASSERT(!m_localToEntry); |
579 | didUseVarOffset(entry.varOffset()); |
580 | Map::AddResult result = m_map.add(key, std::forward<Entry>(entry)); |
581 | ASSERT_UNUSED(result, result.isNewEntry); |
582 | } |
583 | |
584 | template<typename Entry> |
585 | void add(UniquedStringImpl* key, Entry&& entry) |
586 | { |
587 | ConcurrentJSLocker locker(m_lock); |
588 | add(locker, key, std::forward<Entry>(entry)); |
589 | } |
590 | |
591 | template<typename Entry> |
592 | void set(const ConcurrentJSLocker&, UniquedStringImpl* key, Entry&& entry) |
593 | { |
594 | RELEASE_ASSERT(!m_localToEntry); |
595 | didUseVarOffset(entry.varOffset()); |
596 | m_map.set(key, std::forward<Entry>(entry)); |
597 | } |
598 | |
599 | template<typename Entry> |
600 | void set(UniquedStringImpl* key, Entry&& entry) |
601 | { |
602 | ConcurrentJSLocker locker(m_lock); |
603 | set(locker, key, std::forward<Entry>(entry)); |
604 | } |
605 | |
606 | bool contains(const ConcurrentJSLocker&, UniquedStringImpl* key) |
607 | { |
608 | return m_map.contains(key); |
609 | } |
610 | |
611 | bool contains(UniquedStringImpl* key) |
612 | { |
613 | ConcurrentJSLocker locker(m_lock); |
614 | return contains(locker, key); |
615 | } |
616 | |
617 | // The principle behind ScopedArgumentsTable modifications is that we will create one and |
618 | // leave it unlocked - thereby allowing in-place changes - until someone asks for a pointer to |
619 | // the table. Then, we will lock it. Then both our future changes and their future changes |
620 | // will first have to make a copy. This discipline means that usually when we create a |
621 | // ScopedArguments object, we don't have to make a copy of the ScopedArgumentsTable - instead |
622 | // we just take a reference to one that we already have. |
623 | |
624 | uint32_t argumentsLength() const |
625 | { |
626 | if (!m_arguments) |
627 | return 0; |
628 | return m_arguments->length(); |
629 | } |
630 | |
631 | void setArgumentsLength(VM& vm, uint32_t length) |
632 | { |
633 | if (UNLIKELY(!m_arguments)) |
634 | m_arguments.set(vm, this, ScopedArgumentsTable::create(vm, length)); |
635 | else |
636 | m_arguments.set(vm, this, m_arguments->setLength(vm, length)); |
637 | } |
638 | |
639 | ScopeOffset argumentOffset(uint32_t i) const |
640 | { |
641 | ASSERT_WITH_SECURITY_IMPLICATION(m_arguments); |
642 | return m_arguments->get(i); |
643 | } |
644 | |
645 | void setArgumentOffset(VM& vm, uint32_t i, ScopeOffset offset) |
646 | { |
647 | ASSERT_WITH_SECURITY_IMPLICATION(m_arguments); |
648 | m_arguments.set(vm, this, m_arguments->set(vm, i, offset)); |
649 | } |
650 | |
651 | ScopedArgumentsTable* arguments() const |
652 | { |
653 | if (!m_arguments) |
654 | return nullptr; |
655 | m_arguments->lock(); |
656 | return m_arguments.get(); |
657 | } |
658 | |
659 | const LocalToEntryVec& localToEntry(const ConcurrentJSLocker&); |
660 | SymbolTableEntry* entryFor(const ConcurrentJSLocker&, ScopeOffset); |
661 | |
662 | GlobalVariableID uniqueIDForVariable(const ConcurrentJSLocker&, UniquedStringImpl* key, VM&); |
663 | GlobalVariableID uniqueIDForOffset(const ConcurrentJSLocker&, VarOffset, VM&); |
664 | RefPtr<TypeSet> globalTypeSetForOffset(const ConcurrentJSLocker&, VarOffset, VM&); |
665 | RefPtr<TypeSet> globalTypeSetForVariable(const ConcurrentJSLocker&, UniquedStringImpl* key, VM&); |
666 | |
667 | bool usesNonStrictEval() const { return m_usesNonStrictEval; } |
668 | void setUsesNonStrictEval(bool usesNonStrictEval) { m_usesNonStrictEval = usesNonStrictEval; } |
669 | |
670 | bool isNestedLexicalScope() const { return m_nestedLexicalScope; } |
671 | void markIsNestedLexicalScope() { ASSERT(scopeType() == LexicalScope); m_nestedLexicalScope = true; } |
672 | |
673 | enum ScopeType { |
674 | VarScope, |
675 | GlobalLexicalScope, |
676 | LexicalScope, |
677 | CatchScope, |
678 | FunctionNameScope |
679 | }; |
680 | void setScopeType(ScopeType type) { m_scopeType = type; } |
681 | ScopeType scopeType() const { return static_cast<ScopeType>(m_scopeType); } |
682 | |
683 | SymbolTable* cloneScopePart(VM&); |
684 | |
685 | void prepareForTypeProfiling(const ConcurrentJSLocker&); |
686 | |
687 | CodeBlock* rareDataCodeBlock(); |
688 | void setRareDataCodeBlock(CodeBlock*); |
689 | |
690 | InferredValue* singletonScope() { return m_singletonScope.get(); } |
691 | |
692 | static void visitChildren(JSCell*, SlotVisitor&); |
693 | |
694 | DECLARE_EXPORT_INFO; |
695 | |
696 | private: |
697 | JS_EXPORT_PRIVATE SymbolTable(VM&); |
698 | ~SymbolTable(); |
699 | |
700 | JS_EXPORT_PRIVATE void finishCreation(VM&); |
701 | |
702 | Map m_map; |
703 | ScopeOffset m_maxScopeOffset; |
704 | public: |
705 | mutable ConcurrentJSLock m_lock; |
706 | private: |
707 | unsigned m_usesNonStrictEval : 1; |
708 | unsigned m_nestedLexicalScope : 1; // Non-function LexicalScope. |
709 | unsigned m_scopeType : 3; // ScopeType |
710 | |
711 | struct SymbolTableRareData { |
712 | UniqueIDMap m_uniqueIDMap; |
713 | OffsetToVariableMap m_offsetToVariableMap; |
714 | UniqueTypeSetMap m_uniqueTypeSetMap; |
715 | WriteBarrier<CodeBlock> m_codeBlock; |
716 | }; |
717 | std::unique_ptr<SymbolTableRareData> m_rareData; |
718 | |
719 | WriteBarrier<ScopedArgumentsTable> m_arguments; |
720 | WriteBarrier<InferredValue> m_singletonScope; |
721 | |
722 | std::unique_ptr<LocalToEntryVec> m_localToEntry; |
723 | }; |
724 | |
725 | } // namespace JSC |
726 | |