| 1 | /* |
| 2 | * Copyright (C) 2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | |
| 27 | #pragma once |
| 28 | |
| 29 | #include "Instruction.h" |
| 30 | #include <wtf/Vector.h> |
| 31 | |
| 32 | namespace JSC { |
| 33 | |
| 34 | struct Instruction; |
| 35 | |
| 36 | class InstructionStream { |
| 37 | WTF_MAKE_FAST_ALLOCATED; |
| 38 | |
| 39 | using InstructionBuffer = Vector<uint8_t, 0, UnsafeVectorOverflow>; |
| 40 | |
| 41 | friend class InstructionStreamWriter; |
| 42 | friend class CachedInstructionStream; |
| 43 | public: |
| 44 | size_t sizeInBytes() const; |
| 45 | |
| 46 | using Offset = unsigned; |
| 47 | |
| 48 | private: |
| 49 | template<class InstructionBuffer> |
| 50 | class BaseRef { |
| 51 | WTF_MAKE_FAST_ALLOCATED; |
| 52 | |
| 53 | friend class InstructionStream; |
| 54 | |
| 55 | public: |
| 56 | BaseRef(const BaseRef<InstructionBuffer>& other) |
| 57 | : m_instructions(other.m_instructions) |
| 58 | , m_index(other.m_index) |
| 59 | { } |
| 60 | |
| 61 | void operator=(const BaseRef<InstructionBuffer>& other) |
| 62 | { |
| 63 | m_instructions = other.m_instructions; |
| 64 | m_index = other.m_index; |
| 65 | } |
| 66 | |
| 67 | inline const Instruction* operator->() const { return unwrap(); } |
| 68 | inline const Instruction* ptr() const { return unwrap(); } |
| 69 | |
| 70 | bool operator!=(const BaseRef<InstructionBuffer>& other) const |
| 71 | { |
| 72 | return &m_instructions != &other.m_instructions || m_index != other.m_index; |
| 73 | } |
| 74 | |
| 75 | BaseRef next() const |
| 76 | { |
| 77 | return BaseRef { m_instructions, m_index + ptr()->size() }; |
| 78 | } |
| 79 | |
| 80 | inline Offset offset() const |
| 81 | { |
| 82 | return m_index; |
| 83 | } |
| 84 | |
| 85 | bool isValid() const |
| 86 | { |
| 87 | return m_index < m_instructions.size(); |
| 88 | } |
| 89 | |
| 90 | private: |
| 91 | inline const Instruction* unwrap() const { return reinterpret_cast<const Instruction*>(&m_instructions[m_index]); } |
| 92 | |
| 93 | protected: |
| 94 | BaseRef(InstructionBuffer& instructions, size_t index) |
| 95 | : m_instructions(instructions) |
| 96 | , m_index(index) |
| 97 | { } |
| 98 | |
| 99 | InstructionBuffer& m_instructions; |
| 100 | Offset m_index; |
| 101 | }; |
| 102 | |
| 103 | public: |
| 104 | using Ref = BaseRef<const InstructionBuffer>; |
| 105 | |
| 106 | class MutableRef : public BaseRef<InstructionBuffer> { |
| 107 | friend class InstructionStreamWriter; |
| 108 | |
| 109 | protected: |
| 110 | using BaseRef<InstructionBuffer>::BaseRef; |
| 111 | |
| 112 | public: |
| 113 | Ref freeze() const { return Ref { m_instructions, m_index }; } |
| 114 | inline Instruction* operator->() { return unwrap(); } |
| 115 | inline Instruction* ptr() { return unwrap(); } |
| 116 | inline operator Ref() |
| 117 | { |
| 118 | return Ref { m_instructions, m_index }; |
| 119 | } |
| 120 | |
| 121 | private: |
| 122 | inline Instruction* unwrap() { return reinterpret_cast<Instruction*>(&m_instructions[m_index]); } |
| 123 | }; |
| 124 | |
| 125 | private: |
| 126 | class iterator : public Ref { |
| 127 | friend class InstructionStream; |
| 128 | |
| 129 | public: |
| 130 | using Ref::Ref; |
| 131 | |
| 132 | Ref& operator*() |
| 133 | { |
| 134 | return *this; |
| 135 | } |
| 136 | |
| 137 | iterator operator++() |
| 138 | { |
| 139 | m_index += ptr()->size(); |
| 140 | return *this; |
| 141 | } |
| 142 | }; |
| 143 | |
| 144 | public: |
| 145 | inline iterator begin() const |
| 146 | { |
| 147 | return iterator { m_instructions, 0 }; |
| 148 | } |
| 149 | |
| 150 | inline iterator end() const |
| 151 | { |
| 152 | return iterator { m_instructions, m_instructions.size() }; |
| 153 | } |
| 154 | |
| 155 | inline const Ref at(Offset offset) const |
| 156 | { |
| 157 | ASSERT(offset < m_instructions.size()); |
| 158 | return Ref { m_instructions, offset }; |
| 159 | } |
| 160 | |
| 161 | inline size_t size() const |
| 162 | { |
| 163 | return m_instructions.size(); |
| 164 | } |
| 165 | |
| 166 | const void* rawPointer() const |
| 167 | { |
| 168 | return m_instructions.data(); |
| 169 | } |
| 170 | |
| 171 | bool contains(Instruction *) const; |
| 172 | |
| 173 | protected: |
| 174 | explicit InstructionStream(InstructionBuffer&&); |
| 175 | |
| 176 | InstructionBuffer m_instructions; |
| 177 | }; |
| 178 | |
| 179 | class InstructionStreamWriter : public InstructionStream { |
| 180 | friend class BytecodeRewriter; |
| 181 | public: |
| 182 | InstructionStreamWriter() |
| 183 | : InstructionStream({ }) |
| 184 | { } |
| 185 | |
| 186 | inline MutableRef ref(Offset offset) |
| 187 | { |
| 188 | ASSERT(offset < m_instructions.size()); |
| 189 | return MutableRef { m_instructions, offset }; |
| 190 | } |
| 191 | |
| 192 | void seek(unsigned position) |
| 193 | { |
| 194 | ASSERT(position <= m_instructions.size()); |
| 195 | m_position = position; |
| 196 | } |
| 197 | |
| 198 | unsigned position() |
| 199 | { |
| 200 | return m_position; |
| 201 | } |
| 202 | |
| 203 | void write(uint8_t byte) |
| 204 | { |
| 205 | ASSERT(!m_finalized); |
| 206 | if (m_position < m_instructions.size()) |
| 207 | m_instructions[m_position++] = byte; |
| 208 | else { |
| 209 | m_instructions.append(byte); |
| 210 | m_position++; |
| 211 | } |
| 212 | } |
| 213 | void write(uint32_t i) |
| 214 | { |
| 215 | ASSERT(!m_finalized); |
| 216 | uint8_t bytes[4]; |
| 217 | std::memcpy(bytes, &i, sizeof(i)); |
| 218 | |
| 219 | // Though not always obvious, we don't have to invert the order of the |
| 220 | // bytes written here for CPU(BIG_ENDIAN). This is because the incoming |
| 221 | // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms. |
| 222 | write(bytes[0]); |
| 223 | write(bytes[1]); |
| 224 | write(bytes[2]); |
| 225 | write(bytes[3]); |
| 226 | } |
| 227 | |
| 228 | void rewind(MutableRef& ref) |
| 229 | { |
| 230 | ASSERT(ref.offset() < m_instructions.size()); |
| 231 | m_instructions.shrink(ref.offset()); |
| 232 | m_position = ref.offset(); |
| 233 | } |
| 234 | |
| 235 | std::unique_ptr<InstructionStream> finalize() |
| 236 | { |
| 237 | m_finalized = true; |
| 238 | m_instructions.shrinkToFit(); |
| 239 | return std::unique_ptr<InstructionStream> { new InstructionStream(WTFMove(m_instructions)) }; |
| 240 | } |
| 241 | |
| 242 | MutableRef ref() |
| 243 | { |
| 244 | return MutableRef { m_instructions, m_position }; |
| 245 | } |
| 246 | |
| 247 | void swap(InstructionStreamWriter& other) |
| 248 | { |
| 249 | std::swap(m_finalized, other.m_finalized); |
| 250 | std::swap(m_position, other.m_position); |
| 251 | m_instructions.swap(other.m_instructions); |
| 252 | } |
| 253 | |
| 254 | private: |
| 255 | class iterator : public MutableRef { |
| 256 | friend class InstructionStreamWriter; |
| 257 | |
| 258 | protected: |
| 259 | using MutableRef::MutableRef; |
| 260 | |
| 261 | public: |
| 262 | MutableRef& operator*() |
| 263 | { |
| 264 | return *this; |
| 265 | } |
| 266 | |
| 267 | iterator operator++() |
| 268 | { |
| 269 | m_index += ptr()->size(); |
| 270 | return *this; |
| 271 | } |
| 272 | }; |
| 273 | |
| 274 | public: |
| 275 | iterator begin() |
| 276 | { |
| 277 | return iterator { m_instructions, 0 }; |
| 278 | } |
| 279 | |
| 280 | iterator end() |
| 281 | { |
| 282 | return iterator { m_instructions, m_instructions.size() }; |
| 283 | } |
| 284 | |
| 285 | private: |
| 286 | unsigned m_position { 0 }; |
| 287 | bool m_finalized { false }; |
| 288 | }; |
| 289 | |
| 290 | |
| 291 | } // namespace JSC |
| 292 | |