Absolute File Name: | /home/opencoverage/opencoverage/guest-scripts/qtdeclarative/src/qtdeclarative/src/3rdparty/masm/assembler/MacroAssembler.h |
Source code | Switch to Preprocessed file |
Line | Source | Count |
---|---|---|
1 | /* | - |
2 | * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. | - |
3 | * | - |
4 | * Redistribution and use in source and binary forms, with or without | - |
5 | * modification, are permitted provided that the following conditions | - |
6 | * are met: | - |
7 | * 1. Redistributions of source code must retain the above copyright | - |
8 | * notice, this list of conditions and the following disclaimer. | - |
9 | * 2. Redistributions in binary form must reproduce the above copyright | - |
10 | * notice, this list of conditions and the following disclaimer in the | - |
11 | * documentation and/or other materials provided with the distribution. | - |
12 | * | - |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | - |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | - |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | - |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | - |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | - |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | - |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | - |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | - |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | - |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | - |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | - |
24 | */ | - |
25 | - | |
26 | #ifndef MacroAssembler_h | - |
27 | #define MacroAssembler_h | - |
28 | - | |
29 | #include <wtf/Platform.h> | - |
30 | - | |
31 | #if ENABLE(ASSEMBLER) | - |
32 | - | |
33 | #include "MacroAssemblerARMv7.h" | - |
34 | #include "MacroAssemblerARM64.h" | - |
35 | - | |
36 | #if CPU(ARM_THUMB2) | - |
37 | namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; | - |
38 | - | |
39 | #elif CPU(ARM64) | - |
40 | namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; }; | - |
41 | - | |
42 | #elif CPU(ARM_TRADITIONAL) | - |
43 | #include "MacroAssemblerARM.h" | - |
44 | namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; | - |
45 | - | |
46 | #elif CPU(MIPS) | - |
47 | #include "MacroAssemblerMIPS.h" | - |
48 | namespace JSC { | - |
49 | typedef MacroAssemblerMIPS MacroAssemblerBase; | - |
50 | }; | - |
51 | - | |
52 | #elif CPU(X86) | - |
53 | #include "MacroAssemblerX86.h" | - |
54 | namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; | - |
55 | - | |
56 | #elif CPU(X86_64) | - |
57 | #include "MacroAssemblerX86_64.h" | - |
58 | namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; | - |
59 | - | |
60 | #elif CPU(SH4) | - |
61 | #include "MacroAssemblerSH4.h" | - |
62 | namespace JSC { | - |
63 | typedef MacroAssemblerSH4 MacroAssemblerBase; | - |
64 | }; | - |
65 | - | |
66 | #else | - |
67 | #error "The MacroAssembler is not supported on this platform." | - |
68 | #endif | - |
69 | - | |
70 | namespace JSC { | - |
71 | - | |
72 | template <typename MacroAssemblerBase> | - |
73 | class MacroAssembler : public MacroAssemblerBase { | - |
74 | public: | - |
75 | - | |
76 | using DoubleCondition = typename MacroAssemblerBase::DoubleCondition; | - |
77 | using ResultCondition = typename MacroAssemblerBase::ResultCondition; | - |
78 | using RelationalCondition = typename MacroAssemblerBase::RelationalCondition; | - |
79 | using RegisterID = typename MacroAssemblerBase::RegisterID; | - |
80 | using Address = typename MacroAssemblerBase::Address; | - |
81 | using ExtendedAddress = typename MacroAssemblerBase::ExtendedAddress; | - |
82 | using BaseIndex = typename MacroAssemblerBase::BaseIndex; | - |
83 | using ImplicitAddress = typename MacroAssemblerBase::ImplicitAddress; | - |
84 | using AbsoluteAddress = typename MacroAssemblerBase::AbsoluteAddress; | - |
85 | using TrustedImm32 = typename MacroAssemblerBase::TrustedImm32; | - |
86 | using TrustedImm64 = typename MacroAssemblerBase::TrustedImm64; | - |
87 | using TrustedImmPtr = typename MacroAssemblerBase::TrustedImmPtr; | - |
88 | using Imm32 = typename MacroAssemblerBase::Imm32; | - |
89 | using Imm64 = typename MacroAssemblerBase::Imm64; | - |
90 | using ImmPtr = typename MacroAssemblerBase::ImmPtr; | - |
91 | using Label = typename MacroAssemblerBase::Label; | - |
92 | using DataLabelPtr = typename MacroAssemblerBase::DataLabelPtr; | - |
93 | using DataLabel32 = typename MacroAssemblerBase::DataLabel32; | - |
94 | using DataLabelCompact = typename MacroAssemblerBase::DataLabelCompact; | - |
95 | using Jump = typename MacroAssemblerBase::Jump; | - |
96 | using PatchableJump = typename MacroAssemblerBase::PatchableJump; | - |
97 | using MacroAssemblerBase::PointerSize; | - |
98 | - | |
99 | using MacroAssemblerBase::pop; | - |
100 | using MacroAssemblerBase::jump; | - |
101 | using MacroAssemblerBase::branch32; | - |
102 | using MacroAssemblerBase::move; | - |
103 | using MacroAssemblerBase::store32; | - |
104 | using MacroAssemblerBase::add32; | - |
105 | using MacroAssemblerBase::xor32; | - |
106 | using MacroAssemblerBase::sub32; | - |
107 | using MacroAssemblerBase::load32; | - |
108 | - | |
109 | - | |
110 | #if defined(V4_BOOTSTRAP) | - |
111 | using MacroAssemblerBase::loadPtr; | - |
112 | using MacroAssemblerBase::storePtr; | - |
113 | #elif CPU(X86_64) || CPU(ARM64) | - |
114 | using MacroAssemblerBase::add64; | - |
115 | using MacroAssemblerBase::sub64; | - |
116 | using MacroAssemblerBase::xor64; | - |
117 | using MacroAssemblerBase::load64; | - |
118 | using MacroAssemblerBase::store64; | - |
119 | #endif | - |
120 | - | |
121 | #if ENABLE(JIT_CONSTANT_BLINDING) | - |
122 | using MacroAssemblerBase::add32; | - |
123 | using MacroAssemblerBase::and32; | - |
124 | using MacroAssemblerBase::branchAdd32; | - |
125 | using MacroAssemblerBase::branchMul32; | - |
126 | using MacroAssemblerBase::branchSub32; | - |
127 | using MacroAssemblerBase::lshift32; | - |
128 | using MacroAssemblerBase::or32; | - |
129 | using MacroAssemblerBase::rshift32; | - |
130 | using MacroAssemblerBase::store32; | - |
131 | using MacroAssemblerBase::sub32; | - |
132 | using MacroAssemblerBase::urshift32; | - |
133 | using MacroAssemblerBase::xor32; | - |
134 | #endif | - |
135 | - | |
136 | static const double twoToThe32; // This is super useful for some double code. | - |
137 | - | |
138 | // Utilities used by the DFG JIT. | - |
139 | #if ENABLE(DFG_JIT) || ENABLE(DFG_JIT_UTILITY_METHODS) | - |
140 | using MacroAssemblerBase::invert; | - |
141 | - | |
142 | static DoubleCondition invert(DoubleCondition cond) | - |
143 | { | - |
144 | switch (cond) { | - |
145 | case DoubleCondition::DoubleEqual: never executed: case DoubleCondition::DoubleEqual: | 0 |
146 | return DoubleCondition::DoubleNotEqualOrUnordered; never executed: return DoubleCondition::DoubleNotEqualOrUnordered; | 0 |
147 | case DoubleCondition::DoubleNotEqual: never executed: case DoubleCondition::DoubleNotEqual: | 0 |
148 | return DoubleCondition::DoubleEqualOrUnordered; never executed: return DoubleCondition::DoubleEqualOrUnordered; | 0 |
149 | case DoubleCondition::DoubleGreaterThan: never executed: case DoubleCondition::DoubleGreaterThan: | 0 |
150 | return DoubleCondition::DoubleLessThanOrEqualOrUnordered; never executed: return DoubleCondition::DoubleLessThanOrEqualOrUnordered; | 0 |
151 | case DoubleCondition::DoubleGreaterThanOrEqual: never executed: case DoubleCondition::DoubleGreaterThanOrEqual: | 0 |
152 | return DoubleCondition::DoubleLessThanOrUnordered; never executed: return DoubleCondition::DoubleLessThanOrUnordered; | 0 |
153 | case DoubleCondition::DoubleLessThan: never executed: case DoubleCondition::DoubleLessThan: | 0 |
154 | return DoubleCondition::DoubleGreaterThanOrEqualOrUnordered; never executed: return DoubleCondition::DoubleGreaterThanOrEqualOrUnordered; | 0 |
155 | case DoubleCondition::DoubleLessThanOrEqual: never executed: case DoubleCondition::DoubleLessThanOrEqual: | 0 |
156 | return DoubleCondition::DoubleGreaterThanOrUnordered; never executed: return DoubleCondition::DoubleGreaterThanOrUnordered; | 0 |
157 | case DoubleCondition::DoubleEqualOrUnordered: never executed: case DoubleCondition::DoubleEqualOrUnordered: | 0 |
158 | return DoubleCondition::DoubleNotEqual; never executed: return DoubleCondition::DoubleNotEqual; | 0 |
159 | case DoubleCondition::DoubleNotEqualOrUnordered: never executed: case DoubleCondition::DoubleNotEqualOrUnordered: | 0 |
160 | return DoubleCondition::DoubleEqual; never executed: return DoubleCondition::DoubleEqual; | 0 |
161 | case DoubleCondition::DoubleGreaterThanOrUnordered: never executed: case DoubleCondition::DoubleGreaterThanOrUnordered: | 0 |
162 | return DoubleCondition::DoubleLessThanOrEqual; never executed: return DoubleCondition::DoubleLessThanOrEqual; | 0 |
163 | case DoubleCondition::DoubleGreaterThanOrEqualOrUnordered: never executed: case DoubleCondition::DoubleGreaterThanOrEqualOrUnordered: | 0 |
164 | return DoubleCondition::DoubleLessThan; never executed: return DoubleCondition::DoubleLessThan; | 0 |
165 | case DoubleCondition::DoubleLessThanOrUnordered: never executed: case DoubleCondition::DoubleLessThanOrUnordered: | 0 |
166 | return DoubleCondition::DoubleGreaterThanOrEqual; never executed: return DoubleCondition::DoubleGreaterThanOrEqual; | 0 |
167 | case DoubleCondition::DoubleLessThanOrEqualOrUnordered: never executed: case DoubleCondition::DoubleLessThanOrEqualOrUnordered: | 0 |
168 | return DoubleCondition::DoubleGreaterThan; never executed: return DoubleCondition::DoubleGreaterThan; | 0 |
169 | default: never executed: default: | 0 |
170 | RELEASE_ASSERT_NOT_REACHED(); | - |
171 | return DoubleCondition::DoubleEqual; // make compiler happy never executed: return DoubleCondition::DoubleEqual; | 0 |
172 | } | - |
173 | } | - |
174 | - | |
175 | static bool isInvertible(ResultCondition cond) | - |
176 | { | - |
177 | switch (cond) { | - |
178 | case ResultCondition::Zero: never executed: case ResultCondition::Zero: | 0 |
179 | case ResultCondition::NonZero: never executed: case ResultCondition::NonZero: | 0 |
180 | return true; never executed: return true; | 0 |
181 | default: never executed: default: | 0 |
182 | return false; never executed: return false; | 0 |
183 | } | - |
184 | } | - |
185 | - | |
186 | static ResultCondition invert(ResultCondition cond) | - |
187 | { | - |
188 | switch (cond) { | - |
189 | case ResultCondition::Zero: never executed: case ResultCondition::Zero: | 0 |
190 | return ResultCondition::NonZero; never executed: return ResultCondition::NonZero; | 0 |
191 | case ResultCondition::NonZero: never executed: case ResultCondition::NonZero: | 0 |
192 | return ResultCondition::Zero; never executed: return ResultCondition::Zero; | 0 |
193 | default: never executed: default: | 0 |
194 | RELEASE_ASSERT_NOT_REACHED(); | - |
195 | return ResultCondition::Zero; // Make compiler happy for release builds. never executed: return ResultCondition::Zero; | 0 |
196 | } | - |
197 | } | - |
198 | #endif | - |
199 | - | |
200 | // Platform agnostic onvenience functions, | - |
201 | // described in terms of other macro assembly methods. | - |
202 | void pop() | - |
203 | { | - |
204 | addPtr(TrustedImm32(PointerSize), MacroAssemblerBase::stackPointerRegister); | - |
205 | } executed 13188 times by 64 tests: end of block Executed by:
| 13188 |
206 | - | |
207 | void peek(RegisterID dest, int index = 0) | - |
208 | { | - |
209 | loadPtr(Address(MacroAssemblerBase::stackPointerRegister, (index * PointerSize)), dest); | - |
210 | } executed 2274 times by 8 tests: end of block Executed by:
| 2274 |
211 | - | |
212 | Address addressForPoke(int index) | - |
213 | { | - |
214 | return Address(MacroAssemblerBase::stackPointerRegister, (index * PointerSize)); executed 2542 times by 8 tests: return Address(MacroAssemblerBase::stackPointerRegister, (index * PointerSize)); Executed by:
| 2542 |
215 | } | - |
216 | - | |
217 | void poke(RegisterID src, int index = 0) | - |
218 | { | - |
219 | storePtr(src, addressForPoke(index)); | - |
220 | } executed 2414 times by 8 tests: end of block Executed by:
| 2414 |
221 | - | |
222 | void poke(TrustedImm32 value, int index = 0) | - |
223 | { | - |
224 | store32(value, addressForPoke(index)); | - |
225 | } executed 130 times by 2 tests: end of block Executed by:
| 130 |
226 | - | |
227 | #if !defined(V4_BOOTSTRAP) | - |
228 | void poke(TrustedImmPtr imm, int index = 0) | - |
229 | { | - |
230 | storePtr(imm, addressForPoke(index)); | - |
231 | } never executed: end of block | 0 |
232 | #endif | - |
233 | - | |
234 | #if (CPU(X86_64) || CPU(ARM64)) && !defined(V4_BOOTSTRAP) | - |
235 | void peek64(RegisterID dest, int index = 0) | - |
236 | { | - |
237 | load64(Address(MacroAssemblerBase::stackPointerRegister, (index * sizeof(void*))), dest); | - |
238 | } never executed: end of block | 0 |
239 | - | |
240 | void poke(TrustedImm64 value, int index = 0) | - |
241 | { | - |
242 | store64(value, addressForPoke(index)); | - |
243 | } never executed: end of block | 0 |
244 | - | |
245 | void poke64(RegisterID src, int index = 0) | - |
246 | { | - |
247 | store64(src, addressForPoke(index)); | - |
248 | } never executed: end of block | 0 |
249 | #endif | - |
250 | - | |
251 | // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. | - |
252 | void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target) | - |
253 | { | - |
254 | branchPtr(cond, op1, imm).linkTo(target, this); | - |
255 | } never executed: end of block | 0 |
256 | void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target) | - |
257 | { | - |
258 | branchPtr(cond, op1, imm).linkTo(target, this); | - |
259 | } never executed: end of block | 0 |
260 | - | |
261 | void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target) | - |
262 | { | - |
263 | branch32(cond, op1, op2).linkTo(target, this); | - |
264 | } never executed: end of block | 0 |
265 | - | |
266 | void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target) | - |
267 | { | - |
268 | branch32(cond, op1, imm).linkTo(target, this); | - |
269 | } never executed: end of block | 0 |
270 | - | |
271 | void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target) | - |
272 | { | - |
273 | branch32(cond, op1, imm).linkTo(target, this); | - |
274 | } never executed: end of block | 0 |
275 | - | |
276 | void branch32(RelationalCondition cond, RegisterID left, Address right, Label target) | - |
277 | { | - |
278 | branch32(cond, left, right).linkTo(target, this); | - |
279 | } never executed: end of block | 0 |
280 | - | |
281 | Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right) | - |
282 | { | - |
283 | return branch32(commute(cond), right, left); executed 23816 times by 50 tests: return branch32(commute(cond), right, left); Executed by:
| 23816 |
284 | } | - |
285 | - | |
286 | Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right) | - |
287 | { | - |
288 | return branch32(commute(cond), right, left); never executed: return branch32(commute(cond), right, left); | 0 |
289 | } | - |
290 | - | |
291 | void branchTestPtr(ResultCondition cond, RegisterID reg, Label target) | - |
292 | { | - |
293 | branchTestPtr(cond, reg).linkTo(target, this); | - |
294 | } never executed: end of block | 0 |
295 | - | |
296 | #if !CPU(ARM_THUMB2) && !CPU(ARM64) | - |
297 | PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) | - |
298 | { | - |
299 | return PatchableJump(branchPtr(cond, left, right)); never executed: return PatchableJump(branchPtr(cond, left, right)); | 0 |
300 | } | - |
301 | - | |
302 | PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) | - |
303 | { | - |
304 | return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); never executed: return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); | 0 |
305 | } | - |
306 | - | |
307 | PatchableJump patchableJump() | - |
308 | { | - |
309 | return PatchableJump(jump()); never executed: return PatchableJump(jump()); | 0 |
310 | } | - |
311 | - | |
312 | PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) | - |
313 | { | - |
314 | return PatchableJump(branchTest32(cond, reg, mask)); never executed: return PatchableJump(branchTest32(cond, reg, mask)); | 0 |
315 | } | - |
316 | #endif // !CPU(ARM_THUMB2) && !CPU(ARM64) | - |
317 | - | |
318 | #if !CPU(ARM) | - |
319 | PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) | - |
320 | { | - |
321 | return PatchableJump(branch32(cond, reg, imm)); never executed: return PatchableJump(branch32(cond, reg, imm)); | 0 |
322 | } | - |
323 | #endif // !(CPU(ARM) | - |
324 | - | |
325 | void jump(Label target) | - |
326 | { | - |
327 | jump().linkTo(target, this); | - |
328 | } executed 1165267 times by 153 tests: end of block Executed by:
| 1165267 |
329 | - | |
330 | // Commute a relational condition, returns a new condition that will produce | - |
331 | // the same results given the same inputs but with their positions exchanged. | - |
332 | static RelationalCondition commute(RelationalCondition condition) | - |
333 | { | - |
334 | switch (condition) { | - |
335 | case RelationalCondition::Above: never executed: case RelationalCondition::Above: | 0 |
336 | return RelationalCondition::Below; never executed: return RelationalCondition::Below; | 0 |
337 | case RelationalCondition::AboveOrEqual: never executed: case RelationalCondition::AboveOrEqual: | 0 |
338 | return RelationalCondition::BelowOrEqual; never executed: return RelationalCondition::BelowOrEqual; | 0 |
339 | case RelationalCondition::Below: never executed: case RelationalCondition::Below: | 0 |
340 | return RelationalCondition::Above; never executed: return RelationalCondition::Above; | 0 |
341 | case RelationalCondition::BelowOrEqual: never executed: case RelationalCondition::BelowOrEqual: | 0 |
342 | return RelationalCondition::AboveOrEqual; never executed: return RelationalCondition::AboveOrEqual; | 0 |
343 | case RelationalCondition::GreaterThan: never executed: case RelationalCondition::GreaterThan: | 0 |
344 | return RelationalCondition::LessThan; never executed: return RelationalCondition::LessThan; | 0 |
345 | case RelationalCondition::GreaterThanOrEqual: never executed: case RelationalCondition::GreaterThanOrEqual: | 0 |
346 | return RelationalCondition::LessThanOrEqual; never executed: return RelationalCondition::LessThanOrEqual; | 0 |
347 | case RelationalCondition::LessThan: never executed: case RelationalCondition::LessThan: | 0 |
348 | return RelationalCondition::GreaterThan; never executed: return RelationalCondition::GreaterThan; | 0 |
349 | case RelationalCondition::LessThanOrEqual: never executed: case RelationalCondition::LessThanOrEqual: | 0 |
350 | return RelationalCondition::GreaterThanOrEqual; never executed: return RelationalCondition::GreaterThanOrEqual; | 0 |
351 | default: executed 23816 times by 50 tests: default: Executed by:
| 23816 |
352 | break; executed 23816 times by 50 tests: break; Executed by:
| 23816 |
353 | } | - |
354 | - | |
355 | ASSERT(condition == RelationalCondition::Equal || condition == RelationalCondition::NotEqual); | - |
356 | return condition; executed 23816 times by 50 tests: return condition; Executed by:
| 23816 |
357 | } | - |
358 | - | |
359 | static const unsigned BlindingModulus = 64; | - |
360 | bool shouldConsiderBlinding() | - |
361 | { | - |
362 | return !(this->random() & (BlindingModulus - 1)); never executed: return !(this->random() & (BlindingModulus - 1)); | 0 |
363 | } | - |
364 | - | |
365 | #if !defined(V4_BOOTSTRAP) | - |
366 | // Ptr methods | - |
367 | // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. | - |
368 | // FIXME: should this use a test for 32-bitness instead of this specific exception? | - |
369 | #if !CPU(X86_64) && !CPU(ARM64) | - |
370 | void addPtr(Address src, RegisterID dest) | - |
371 | { | - |
372 | add32(src, dest); | - |
373 | } | - |
374 | - | |
375 | void addPtr(AbsoluteAddress src, RegisterID dest) | - |
376 | { | - |
377 | add32(src, dest); | - |
378 | } | - |
379 | - | |
380 | void addPtr(RegisterID src, RegisterID dest) | - |
381 | { | - |
382 | add32(src, dest); | - |
383 | } | - |
384 | - | |
385 | void addPtr(TrustedImm32 imm, RegisterID srcDest) | - |
386 | { | - |
387 | add32(imm, srcDest); | - |
388 | } | - |
389 | - | |
390 | void addPtr(TrustedImmPtr imm, RegisterID dest) | - |
391 | { | - |
392 | add32(TrustedImm32(imm), dest); | - |
393 | } | - |
394 | - | |
395 | void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) | - |
396 | { | - |
397 | add32(imm, src, dest); | - |
398 | } | - |
399 | - | |
400 | void addPtr(TrustedImm32 imm, AbsoluteAddress address) | - |
401 | { | - |
402 | add32(imm, address); | - |
403 | } | - |
404 | - | |
405 | void andPtr(RegisterID src, RegisterID dest) | - |
406 | { | - |
407 | and32(src, dest); | - |
408 | } | - |
409 | - | |
410 | void andPtr(TrustedImm32 imm, RegisterID srcDest) | - |
411 | { | - |
412 | and32(imm, srcDest); | - |
413 | } | - |
414 | - | |
415 | void negPtr(RegisterID dest) | - |
416 | { | - |
417 | neg32(dest); | - |
418 | } | - |
419 | - | |
420 | void orPtr(RegisterID src, RegisterID dest) | - |
421 | { | - |
422 | or32(src, dest); | - |
423 | } | - |
424 | - | |
425 | void orPtr(RegisterID op1, RegisterID op2, RegisterID dest) | - |
426 | { | - |
427 | or32(op1, op2, dest); | - |
428 | } | - |
429 | - | |
430 | void orPtr(TrustedImmPtr imm, RegisterID dest) | - |
431 | { | - |
432 | or32(TrustedImm32(imm), dest); | - |
433 | } | - |
434 | - | |
435 | void orPtr(TrustedImm32 imm, RegisterID dest) | - |
436 | { | - |
437 | or32(imm, dest); | - |
438 | } | - |
439 | - | |
440 | void subPtr(RegisterID src, RegisterID dest) | - |
441 | { | - |
442 | sub32(src, dest); | - |
443 | } | - |
444 | - | |
445 | void subPtr(TrustedImm32 imm, RegisterID dest) | - |
446 | { | - |
447 | sub32(imm, dest); | - |
448 | } | - |
449 | - | |
450 | void subPtr(TrustedImmPtr imm, RegisterID dest) | - |
451 | { | - |
452 | sub32(TrustedImm32(imm), dest); | - |
453 | } | - |
454 | - | |
455 | void xorPtr(RegisterID src, RegisterID dest) | - |
456 | { | - |
457 | xor32(src, dest); | - |
458 | } | - |
459 | - | |
460 | void xorPtr(TrustedImm32 imm, RegisterID srcDest) | - |
461 | { | - |
462 | xor32(imm, srcDest); | - |
463 | } | - |
464 | - | |
465 | - | |
466 | void loadPtr(ImplicitAddress address, RegisterID dest) | - |
467 | { | - |
468 | load32(address, dest); | - |
469 | } | - |
470 | - | |
471 | void loadPtr(BaseIndex address, RegisterID dest) | - |
472 | { | - |
473 | load32(address, dest); | - |
474 | } | - |
475 | - | |
476 | void loadPtr(const void* address, RegisterID dest) | - |
477 | { | - |
478 | load32(address, dest); | - |
479 | } | - |
480 | - | |
481 | DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) | - |
482 | { | - |
483 | return load32WithAddressOffsetPatch(address, dest); | - |
484 | } | - |
485 | - | |
486 | DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest) | - |
487 | { | - |
488 | return load32WithCompactAddressOffsetPatch(address, dest); | - |
489 | } | - |
490 | - | |
491 | void move(ImmPtr imm, RegisterID dest) | - |
492 | { | - |
493 | move(Imm32(imm.asTrustedImmPtr()), dest); | - |
494 | } | - |
495 | - | |
496 | void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) | - |
497 | { | - |
498 | compare32(cond, left, right, dest); | - |
499 | } | - |
500 | - | |
501 | void storePtr(RegisterID src, ImplicitAddress address) | - |
502 | { | - |
503 | store32(src, address); | - |
504 | } | - |
505 | - | |
506 | void storePtr(RegisterID src, BaseIndex address) | - |
507 | { | - |
508 | store32(src, address); | - |
509 | } | - |
510 | - | |
511 | void storePtr(RegisterID src, void* address) | - |
512 | { | - |
513 | store32(src, address); | - |
514 | } | - |
515 | - | |
516 | void storePtr(TrustedImmPtr imm, ImplicitAddress address) | - |
517 | { | - |
518 | store32(TrustedImm32(imm), address); | - |
519 | } | - |
520 | - | |
521 | void storePtr(ImmPtr imm, Address address) | - |
522 | { | - |
523 | store32(Imm32(imm.asTrustedImmPtr()), address); | - |
524 | } | - |
525 | - | |
526 | void storePtr(TrustedImmPtr imm, void* address) | - |
527 | { | - |
528 | store32(TrustedImm32(imm), address); | - |
529 | } | - |
530 | - | |
531 | DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) | - |
532 | { | - |
533 | return store32WithAddressOffsetPatch(src, address); | - |
534 | } | - |
535 | - | |
536 | Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right) | - |
537 | { | - |
538 | return branch32(cond, left, right); | - |
539 | } | - |
540 | - | |
541 | Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right) | - |
542 | { | - |
543 | return branch32(cond, left, TrustedImm32(right)); | - |
544 | } | - |
545 | - | |
546 | Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) | - |
547 | { | - |
548 | return branch32(cond, left, Imm32(right.asTrustedImmPtr())); | - |
549 | } | - |
550 | - | |
551 | Jump branchPtr(RelationalCondition cond, RegisterID left, Address right) | - |
552 | { | - |
553 | return branch32(cond, left, right); | - |
554 | } | - |
555 | - | |
556 | Jump branchPtr(RelationalCondition cond, Address left, RegisterID right) | - |
557 | { | - |
558 | return branch32(cond, left, right); | - |
559 | } | - |
560 | - | |
561 | Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right) | - |
562 | { | - |
563 | return branch32(cond, left, right); | - |
564 | } | - |
565 | - | |
566 | Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) | - |
567 | { | - |
568 | return branch32(cond, left, TrustedImm32(right)); | - |
569 | } | - |
570 | - | |
571 | Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right) | - |
572 | { | - |
573 | return branch32(cond, left, TrustedImm32(right)); | - |
574 | } | - |
575 | - | |
576 | Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest) | - |
577 | { | - |
578 | return branchSub32(cond, src, dest); | - |
579 | } | - |
580 | - | |
581 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask) | - |
582 | { | - |
583 | return branchTest32(cond, reg, mask); | - |
584 | } | - |
585 | - | |
586 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) | - |
587 | { | - |
588 | return branchTest32(cond, reg, mask); | - |
589 | } | - |
590 | - | |
591 | Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) | - |
592 | { | - |
593 | return branchTest32(cond, address, mask); | - |
594 | } | - |
595 | - | |
596 | Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) | - |
597 | { | - |
598 | return branchTest32(cond, address, mask); | - |
599 | } | - |
600 | - | |
601 | Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest) | - |
602 | { | - |
603 | return branchAdd32(cond, src, dest); | - |
604 | } | - |
605 | - | |
606 | Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest) | - |
607 | { | - |
608 | return branchSub32(cond, imm, dest); | - |
609 | } | - |
610 | using MacroAssemblerBase::branchTest8; | - |
611 | Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) | - |
612 | { | - |
613 | return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); | - |
614 | } | - |
615 | #else | - |
616 | void addPtr(RegisterID src, RegisterID dest) | - |
617 | { | - |
618 | add64(src, dest); | - |
619 | } never executed: end of block | 0 |
620 | - | |
621 | void addPtr(Address src, RegisterID dest) | - |
622 | { | - |
623 | add64(src, dest); | - |
624 | } never executed: end of block | 0 |
625 | - | |
626 | void addPtr(TrustedImm32 imm, RegisterID srcDest) | - |
627 | { | - |
628 | add64(imm, srcDest); | - |
629 | } executed 15350 times by 65 tests: end of block Executed by:
| 15350 |
630 | - | |
631 | void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) | - |
632 | { | - |
633 | add64(imm, src, dest); | - |
634 | } executed 51526 times by 62 tests: end of block Executed by:
| 51526 |
635 | - | |
636 | void addPtr(TrustedImm32 imm, Address address) | - |
637 | { | - |
638 | add64(imm, address); | - |
639 | } never executed: end of block | 0 |
640 | - | |
641 | void addPtr(AbsoluteAddress src, RegisterID dest) | - |
642 | { | - |
643 | add64(src, dest); | - |
644 | } never executed: end of block | 0 |
645 | - | |
646 | void addPtr(TrustedImmPtr imm, RegisterID dest) | - |
647 | { | - |
648 | add64(TrustedImm64(imm), dest); | - |
649 | } never executed: end of block | 0 |
650 | - | |
651 | void addPtr(TrustedImm32 imm, AbsoluteAddress address) | - |
652 | { | - |
653 | add64(imm, address); | - |
654 | } never executed: end of block | 0 |
655 | - | |
656 | void andPtr(RegisterID src, RegisterID dest) | - |
657 | { | - |
658 | and64(src, dest); | - |
659 | } never executed: end of block | 0 |
660 | - | |
661 | void andPtr(TrustedImm32 imm, RegisterID srcDest) | - |
662 | { | - |
663 | and64(imm, srcDest); | - |
664 | } never executed: end of block | 0 |
665 | - | |
666 | void negPtr(RegisterID dest) | - |
667 | { | - |
668 | neg64(dest); | - |
669 | } never executed: end of block | 0 |
670 | - | |
671 | void orPtr(RegisterID src, RegisterID dest) | - |
672 | { | - |
673 | or64(src, dest); | - |
674 | } never executed: end of block | 0 |
675 | - | |
676 | void orPtr(TrustedImm32 imm, RegisterID dest) | - |
677 | { | - |
678 | or64(imm, dest); | - |
679 | } never executed: end of block | 0 |
680 | - | |
681 | void orPtr(TrustedImmPtr imm, RegisterID dest) | - |
682 | { | - |
683 | or64(TrustedImm64(imm), dest); | - |
684 | } never executed: end of block | 0 |
685 | - | |
686 | void orPtr(RegisterID op1, RegisterID op2, RegisterID dest) | - |
687 | { | - |
688 | or64(op1, op2, dest); | - |
689 | } never executed: end of block | 0 |
690 | - | |
691 | void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) | - |
692 | { | - |
693 | or64(imm, src, dest); | - |
694 | } never executed: end of block | 0 |
695 | - | |
696 | void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst) | - |
697 | { | - |
698 | rotateRight64(imm, srcDst); | - |
699 | } never executed: end of block | 0 |
700 | - | |
701 | void subPtr(RegisterID src, RegisterID dest) | - |
702 | { | - |
703 | sub64(src, dest); | - |
704 | } never executed: end of block | 0 |
705 | - | |
706 | void subPtr(TrustedImm32 imm, RegisterID dest) | - |
707 | { | - |
708 | sub64(imm, dest); | - |
709 | } executed 1229 times by 16 tests: end of block Executed by:
| 1229 |
710 | - | |
711 | void subPtr(TrustedImmPtr imm, RegisterID dest) | - |
712 | { | - |
713 | sub64(TrustedImm64(imm), dest); | - |
714 | } never executed: end of block | 0 |
715 | - | |
716 | void xorPtr(RegisterID src, RegisterID dest) | - |
717 | { | - |
718 | xor64(src, dest); | - |
719 | } never executed: end of block | 0 |
720 | - | |
721 | void xorPtr(RegisterID src, Address dest) | - |
722 | { | - |
723 | xor64(src, dest); | - |
724 | } never executed: end of block | 0 |
725 | - | |
726 | void xorPtr(TrustedImm32 imm, RegisterID srcDest) | - |
727 | { | - |
728 | xor64(imm, srcDest); | - |
729 | } never executed: end of block | 0 |
730 | - | |
731 | void loadPtr(ImplicitAddress address, RegisterID dest) | - |
732 | { | - |
733 | load64(address, dest); | - |
734 | } executed 41870 times by 65 tests: end of block Executed by:
| 41870 |
735 | - | |
736 | void loadPtr(BaseIndex address, RegisterID dest) | - |
737 | { | - |
738 | load64(address, dest); | - |
739 | } never executed: end of block | 0 |
740 | - | |
741 | void loadPtr(const void* address, RegisterID dest) | - |
742 | { | - |
743 | load64(address, dest); | - |
744 | } never executed: end of block | 0 |
745 | - | |
746 | DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) | - |
747 | { | - |
748 | return load64WithAddressOffsetPatch(address, dest); never executed: return load64WithAddressOffsetPatch(address, dest); | 0 |
749 | } | - |
750 | - | |
751 | DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest) | - |
752 | { | - |
753 | return load64WithCompactAddressOffsetPatch(address, dest); never executed: return load64WithCompactAddressOffsetPatch(address, dest); | 0 |
754 | } | - |
755 | - | |
756 | void storePtr(RegisterID src, ImplicitAddress address) | - |
757 | { | - |
758 | store64(src, address); | - |
759 | } executed 2414 times by 8 tests: end of block Executed by:
| 2414 |
760 | - | |
761 | void storePtr(RegisterID src, BaseIndex address) | - |
762 | { | - |
763 | store64(src, address); | - |
764 | } never executed: end of block | 0 |
765 | - | |
766 | void storePtr(RegisterID src, void* address) | - |
767 | { | - |
768 | store64(src, address); | - |
769 | } never executed: end of block | 0 |
770 | - | |
771 | void storePtr(TrustedImmPtr imm, ImplicitAddress address) | - |
772 | { | - |
773 | store64(TrustedImm64(imm), address); | - |
774 | } executed 58 times by 7 tests: end of block Executed by:
| 58 |
775 | - | |
776 | void storePtr(TrustedImmPtr imm, BaseIndex address) | - |
777 | { | - |
778 | store64(TrustedImm64(imm), address); | - |
779 | } never executed: end of block | 0 |
780 | - | |
781 | DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) | - |
782 | { | - |
783 | return store64WithAddressOffsetPatch(src, address); never executed: return store64WithAddressOffsetPatch(src, address); | 0 |
784 | } | - |
785 | - | |
786 | void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) | - |
787 | { | - |
788 | compare64(cond, left, right, dest); | - |
789 | } never executed: end of block | 0 |
790 | - | |
791 | void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) | - |
792 | { | - |
793 | compare64(cond, left, right, dest); | - |
794 | } never executed: end of block | 0 |
795 | - | |
796 | void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) | - |
797 | { | - |
798 | test64(cond, reg, mask, dest); | - |
799 | } never executed: end of block | 0 |
800 | - | |
801 | void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) | - |
802 | { | - |
803 | test64(cond, reg, mask, dest); | - |
804 | } never executed: end of block | 0 |
805 | - | |
806 | Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right) | - |
807 | { | - |
808 | return branch64(cond, left, right); never executed: return branch64(cond, left, right); | 0 |
809 | } | - |
810 | - | |
811 | Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right) | - |
812 | { | - |
813 | return this->branch64(cond, left, TrustedImm64(right)); executed 13188 times by 64 tests: return this->branch64(cond, left, TrustedImm64(right)); Executed by:
| 13188 |
814 | } | - |
815 | - | |
816 | Jump branchPtr(RelationalCondition cond, RegisterID left, Address right) | - |
817 | { | - |
818 | return branch64(cond, left, right); never executed: return branch64(cond, left, right); | 0 |
819 | } | - |
820 | - | |
821 | Jump branchPtr(RelationalCondition cond, Address left, RegisterID right) | - |
822 | { | - |
823 | return branch64(cond, left, right); never executed: return branch64(cond, left, right); | 0 |
824 | } | - |
825 | - | |
826 | Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right) | - |
827 | { | - |
828 | return branch64(cond, left, right); never executed: return branch64(cond, left, right); | 0 |
829 | } | - |
830 | - | |
831 | Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) | - |
832 | { | - |
833 | return branch64(cond, left, TrustedImm64(right)); never executed: return branch64(cond, left, TrustedImm64(right)); | 0 |
834 | } | - |
835 | - | |
836 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask) | - |
837 | { | - |
838 | return branchTest64(cond, reg, mask); never executed: return branchTest64(cond, reg, mask); | 0 |
839 | } | - |
840 | - | |
841 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) | - |
842 | { | - |
843 | return branchTest64(cond, reg, mask); never executed: return branchTest64(cond, reg, mask); | 0 |
844 | } | - |
845 | - | |
846 | Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) | - |
847 | { | - |
848 | return branchTest64(cond, address, mask); never executed: return branchTest64(cond, address, mask); | 0 |
849 | } | - |
850 | - | |
851 | Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg) | - |
852 | { | - |
853 | return branchTest64(cond, address, reg); never executed: return branchTest64(cond, address, reg); | 0 |
854 | } | - |
855 | - | |
856 | Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) | - |
857 | { | - |
858 | return branchTest64(cond, address, mask); never executed: return branchTest64(cond, address, mask); | 0 |
859 | } | - |
860 | - | |
861 | Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) | - |
862 | { | - |
863 | return branchTest64(cond, address, mask); never executed: return branchTest64(cond, address, mask); | 0 |
864 | } | - |
865 | - | |
866 | Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest) | - |
867 | { | - |
868 | return branchAdd64(cond, imm, dest); never executed: return branchAdd64(cond, imm, dest); | 0 |
869 | } | - |
870 | - | |
871 | Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest) | - |
872 | { | - |
873 | return branchAdd64(cond, src, dest); never executed: return branchAdd64(cond, src, dest); | 0 |
874 | } | - |
875 | - | |
876 | Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest) | - |
877 | { | - |
878 | return branchSub64(cond, imm, dest); never executed: return branchSub64(cond, imm, dest); | 0 |
879 | } | - |
880 | - | |
881 | Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest) | - |
882 | { | - |
883 | return branchSub64(cond, src, dest); never executed: return branchSub64(cond, src, dest); | 0 |
884 | } | - |
885 | - | |
886 | Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) | - |
887 | { | - |
888 | return branchSub64(cond, src1, src2, dest); never executed: return branchSub64(cond, src1, src2, dest); | 0 |
889 | } | - |
890 | #endif // !defined(V4_BOOTSTRAP) | - |
891 | - | |
892 | #if ENABLE(JIT_CONSTANT_BLINDING) | - |
893 | using MacroAssemblerBase::and64; | - |
894 | using MacroAssemblerBase::convertInt32ToDouble; | - |
895 | using MacroAssemblerBase::store64; | - |
896 | bool shouldBlindDouble(double value) | - |
897 | { | - |
898 | // Don't trust NaN or +/-Infinity | - |
899 | if (!std::isfinite(value)) | - |
900 | return shouldConsiderBlinding(); | - |
901 | - | |
902 | // Try to force normalisation, and check that there's no change | - |
903 | // in the bit pattern | - |
904 | if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value)) | - |
905 | return shouldConsiderBlinding(); | - |
906 | - | |
907 | value = abs(value); | - |
908 | // Only allow a limited set of fractional components | - |
909 | double scaledValue = value * 8; | - |
910 | if (scaledValue / 8 != value) | - |
911 | return shouldConsiderBlinding(); | - |
912 | double frac = scaledValue - floor(scaledValue); | - |
913 | if (frac != 0.0) | - |
914 | return shouldConsiderBlinding(); | - |
915 | - | |
916 | return value > 0xff; | - |
917 | } | - |
918 | - | |
919 | bool shouldBlind(ImmPtr imm) | - |
920 | { | - |
921 | #if ENABLE(FORCED_JIT_BLINDING) | - |
922 | UNUSED_PARAM(imm); | - |
923 | // Debug always blind all constants, if only so we know | - |
924 | // if we've broken blinding during patch development. | - |
925 | return true; | - |
926 | #endif | - |
927 | - | |
928 | // First off we'll special case common, "safe" values to avoid hurting | - |
929 | // performance too much | - |
930 | uintptr_t value = imm.asTrustedImmPtr().asIntptr(); | - |
931 | switch (value) { | - |
932 | case 0xffff: | - |
933 | case 0xffffff: | - |
934 | case 0xffffffffL: | - |
935 | case 0xffffffffffL: | - |
936 | case 0xffffffffffffL: | - |
937 | case 0xffffffffffffffL: | - |
938 | case 0xffffffffffffffffL: | - |
939 | return false; | - |
940 | default: { | - |
941 | if (value <= 0xff) | - |
942 | return false; | - |
943 | if (~value <= 0xff) | - |
944 | return false; | - |
945 | } | - |
946 | } | - |
947 | - | |
948 | if (!shouldConsiderBlinding()) | - |
949 | return false; | - |
950 | - | |
951 | return shouldBlindForSpecificArch(value); | - |
952 | } | - |
953 | - | |
954 | struct RotatedImmPtr { | - |
955 | RotatedImmPtr(uintptr_t v1, uint8_t v2) | - |
956 | : value(v1) | - |
957 | , rotation(v2) | - |
958 | { | - |
959 | } | - |
960 | TrustedImmPtr value; | - |
961 | TrustedImm32 rotation; | - |
962 | }; | - |
963 | - | |
964 | RotatedImmPtr rotationBlindConstant(ImmPtr imm) | - |
965 | { | - |
966 | uint8_t rotation = random() % (sizeof(void*) * 8); | - |
967 | uintptr_t value = imm.asTrustedImmPtr().asIntptr(); | - |
968 | value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation)); | - |
969 | return RotatedImmPtr(value, rotation); | - |
970 | } | - |
971 | - | |
972 | void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest) | - |
973 | { | - |
974 | move(constant.value, dest); | - |
975 | rotateRightPtr(constant.rotation, dest); | - |
976 | } | - |
977 | - | |
978 | bool shouldBlind(Imm64 imm) | - |
979 | { | - |
980 | #if ENABLE(FORCED_JIT_BLINDING) | - |
981 | UNUSED_PARAM(imm); | - |
982 | // Debug always blind all constants, if only so we know | - |
983 | // if we've broken blinding during patch development. | - |
984 | return true; | - |
985 | #endif | - |
986 | - | |
987 | // First off we'll special case common, "safe" values to avoid hurting | - |
988 | // performance too much | - |
989 | uint64_t value = imm.asTrustedImm64().m_value; | - |
990 | switch (value) { | - |
991 | case 0xffff: | - |
992 | case 0xffffff: | - |
993 | case 0xffffffffL: | - |
994 | case 0xffffffffffL: | - |
995 | case 0xffffffffffffL: | - |
996 | case 0xffffffffffffffL: | - |
997 | case 0xffffffffffffffffL: | - |
998 | return false; | - |
999 | default: { | - |
1000 | if (value <= 0xff) | - |
1001 | return false; | - |
1002 | if (~value <= 0xff) | - |
1003 | return false; | - |
1004 | - | |
1005 | JSValue jsValue = JSValue::decode(value); | - |
1006 | if (jsValue.isInt32()) | - |
1007 | return shouldBlind(Imm32(jsValue.asInt32())); | - |
1008 | if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble())) | - |
1009 | return false; | - |
1010 | - | |
1011 | if (!shouldBlindDouble(bitwise_cast<double>(value))) | - |
1012 | return false; | - |
1013 | } | - |
1014 | } | - |
1015 | - | |
1016 | if (!shouldConsiderBlinding()) | - |
1017 | return false; | - |
1018 | - | |
1019 | return shouldBlindForSpecificArch(value); | - |
1020 | } | - |
1021 | - | |
1022 | struct RotatedImm64 { | - |
1023 | RotatedImm64(uint64_t v1, uint8_t v2) | - |
1024 | : value(v1) | - |
1025 | , rotation(v2) | - |
1026 | { | - |
1027 | } | - |
1028 | TrustedImm64 value; | - |
1029 | TrustedImm32 rotation; | - |
1030 | }; | - |
1031 | - | |
1032 | RotatedImm64 rotationBlindConstant(Imm64 imm) | - |
1033 | { | - |
1034 | uint8_t rotation = random() % (sizeof(int64_t) * 8); | - |
1035 | uint64_t value = imm.asTrustedImm64().m_value; | - |
1036 | value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation)); | - |
1037 | return RotatedImm64(value, rotation); | - |
1038 | } | - |
1039 | - | |
1040 | void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest) | - |
1041 | { | - |
1042 | move(constant.value, dest); | - |
1043 | rotateRight64(constant.rotation, dest); | - |
1044 | } | - |
1045 | - | |
1046 | void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) | - |
1047 | { | - |
1048 | if (shouldBlind(imm)) { | - |
1049 | RegisterID scratchRegister = scratchRegisterForBlinding(); | - |
1050 | loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); | - |
1051 | convertInt32ToDouble(scratchRegister, dest); | - |
1052 | } else | - |
1053 | convertInt32ToDouble(imm.asTrustedImm32(), dest); | - |
1054 | } | - |
1055 | - | |
1056 | void move(ImmPtr imm, RegisterID dest) | - |
1057 | { | - |
1058 | if (shouldBlind(imm)) | - |
1059 | loadRotationBlindedConstant(rotationBlindConstant(imm), dest); | - |
1060 | else | - |
1061 | move(imm.asTrustedImmPtr(), dest); | - |
1062 | } | - |
1063 | - | |
1064 | void move(Imm64 imm, RegisterID dest) | - |
1065 | { | - |
1066 | if (shouldBlind(imm)) | - |
1067 | loadRotationBlindedConstant(rotationBlindConstant(imm), dest); | - |
1068 | else | - |
1069 | move(imm.asTrustedImm64(), dest); | - |
1070 | } | - |
1071 | - | |
1072 | void and64(Imm32 imm, RegisterID dest) | - |
1073 | { | - |
1074 | if (shouldBlind(imm)) { | - |
1075 | BlindedImm32 key = andBlindedConstant(imm); | - |
1076 | and64(key.value1, dest); | - |
1077 | and64(key.value2, dest); | - |
1078 | } else | - |
1079 | and64(imm.asTrustedImm32(), dest); | - |
1080 | } | - |
1081 | - | |
1082 | Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) | - |
1083 | { | - |
1084 | if (shouldBlind(right)) { | - |
1085 | RegisterID scratchRegister = scratchRegisterForBlinding(); | - |
1086 | loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister); | - |
1087 | return branchPtr(cond, left, scratchRegister); | - |
1088 | } | - |
1089 | return branchPtr(cond, left, right.asTrustedImmPtr()); | - |
1090 | } | - |
1091 | - | |
1092 | void storePtr(ImmPtr imm, Address dest) | - |
1093 | { | - |
1094 | if (shouldBlind(imm)) { | - |
1095 | RegisterID scratchRegister = scratchRegisterForBlinding(); | - |
1096 | loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); | - |
1097 | storePtr(scratchRegister, dest); | - |
1098 | } else | - |
1099 | storePtr(imm.asTrustedImmPtr(), dest); | - |
1100 | } | - |
1101 | - | |
1102 | void store64(Imm64 imm, Address dest) | - |
1103 | { | - |
1104 | if (shouldBlind(imm)) { | - |
1105 | RegisterID scratchRegister = scratchRegisterForBlinding(); | - |
1106 | loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); | - |
1107 | store64(scratchRegister, dest); | - |
1108 | } else | - |
1109 | store64(imm.asTrustedImm64(), dest); | - |
1110 | } | - |
1111 | - | |
1112 | #endif | - |
1113 | - | |
1114 | #endif // !CPU(X86_64) | - |
1115 | - | |
1116 | #if ENABLE(JIT_CONSTANT_BLINDING) | - |
1117 | bool shouldBlind(Imm32 imm) | - |
1118 | { | - |
1119 | #if ENABLE(FORCED_JIT_BLINDING) | - |
1120 | UNUSED_PARAM(imm); | - |
1121 | // Debug always blind all constants, if only so we know | - |
1122 | // if we've broken blinding during patch development. | - |
1123 | return true; | - |
1124 | #else | - |
1125 | - | |
1126 | // First off we'll special case common, "safe" values to avoid hurting | - |
1127 | // performance too much | - |
1128 | uint32_t value = imm.asTrustedImm32().m_value; | - |
1129 | switch (value) { | - |
1130 | case 0xffff: | - |
1131 | case 0xffffff: | - |
1132 | case 0xffffffff: | - |
1133 | return false; | - |
1134 | default: | - |
1135 | if (value <= 0xff) | - |
1136 | return false; | - |
1137 | if (~value <= 0xff) | - |
1138 | return false; | - |
1139 | } | - |
1140 | - | |
1141 | if (!shouldConsiderBlinding()) | - |
1142 | return false; | - |
1143 | - | |
1144 | return shouldBlindForSpecificArch(value); | - |
1145 | #endif | - |
1146 | } | - |
1147 | - | |
1148 | struct BlindedImm32 { | - |
1149 | BlindedImm32(int32_t v1, int32_t v2) | - |
1150 | : value1(v1) | - |
1151 | , value2(v2) | - |
1152 | { | - |
1153 | } | - |
1154 | TrustedImm32 value1; | - |
1155 | TrustedImm32 value2; | - |
1156 | }; | - |
1157 | - | |
1158 | uint32_t keyForConstant(uint32_t value, uint32_t& mask) | - |
1159 | { | - |
1160 | uint32_t key = random(); | - |
1161 | if (value <= 0xff) | - |
1162 | mask = 0xff; | - |
1163 | else if (value <= 0xffff) | - |
1164 | mask = 0xffff; | - |
1165 | else if (value <= 0xffffff) | - |
1166 | mask = 0xffffff; | - |
1167 | else | - |
1168 | mask = 0xffffffff; | - |
1169 | return key & mask; | - |
1170 | } | - |
1171 | - | |
1172 | uint32_t keyForConstant(uint32_t value) | - |
1173 | { | - |
1174 | uint32_t mask = 0; | - |
1175 | return keyForConstant(value, mask); | - |
1176 | } | - |
1177 | - | |
1178 | BlindedImm32 xorBlindConstant(Imm32 imm) | - |
1179 | { | - |
1180 | uint32_t baseValue = imm.asTrustedImm32().m_value; | - |
1181 | uint32_t key = keyForConstant(baseValue); | - |
1182 | return BlindedImm32(baseValue ^ key, key); | - |
1183 | } | - |
1184 | - | |
1185 | BlindedImm32 additionBlindedConstant(Imm32 imm) | - |
1186 | { | - |
1187 | // The addition immediate may be used as a pointer offset. Keep aligned based on "imm". | - |
1188 | static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff }; | - |
1189 | - | |
1190 | uint32_t baseValue = imm.asTrustedImm32().m_value; | - |
1191 | uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3]; | - |
1192 | if (key > baseValue) | - |
1193 | key = key - baseValue; | - |
1194 | return BlindedImm32(baseValue - key, key); | - |
1195 | } | - |
1196 | - | |
1197 | BlindedImm32 andBlindedConstant(Imm32 imm) | - |
1198 | { | - |
1199 | uint32_t baseValue = imm.asTrustedImm32().m_value; | - |
1200 | uint32_t mask = 0; | - |
1201 | uint32_t key = keyForConstant(baseValue, mask); | - |
1202 | ASSERT((baseValue & mask) == baseValue); | - |
1203 | return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask); | - |
1204 | } | - |
1205 | - | |
1206 | BlindedImm32 orBlindedConstant(Imm32 imm) | - |
1207 | { | - |
1208 | uint32_t baseValue = imm.asTrustedImm32().m_value; | - |
1209 | uint32_t mask = 0; | - |
1210 | uint32_t key = keyForConstant(baseValue, mask); | - |
1211 | ASSERT((baseValue & mask) == baseValue); | - |
1212 | return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask); | - |
1213 | } | - |
1214 | - | |
1215 | void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest) | - |
1216 | { | - |
1217 | move(constant.value1, dest); | - |
1218 | xor32(constant.value2, dest); | - |
1219 | } | - |
1220 | - | |
1221 | void add32(Imm32 imm, RegisterID dest) | - |
1222 | { | - |
1223 | if (shouldBlind(imm)) { | - |
1224 | BlindedImm32 key = additionBlindedConstant(imm); | - |
1225 | add32(key.value1, dest); | - |
1226 | add32(key.value2, dest); | - |
1227 | } else | - |
1228 | add32(imm.asTrustedImm32(), dest); | - |
1229 | } | - |
1230 | - | |
1231 | void addPtr(Imm32 imm, RegisterID dest) | - |
1232 | { | - |
1233 | if (shouldBlind(imm)) { | - |
1234 | BlindedImm32 key = additionBlindedConstant(imm); | - |
1235 | addPtr(key.value1, dest); | - |
1236 | addPtr(key.value2, dest); | - |
1237 | } else | - |
1238 | addPtr(imm.asTrustedImm32(), dest); | - |
1239 | } | - |
1240 | - | |
1241 | void and32(Imm32 imm, RegisterID dest) | - |
1242 | { | - |
1243 | if (shouldBlind(imm)) { | - |
1244 | BlindedImm32 key = andBlindedConstant(imm); | - |
1245 | and32(key.value1, dest); | - |
1246 | and32(key.value2, dest); | - |
1247 | } else | - |
1248 | and32(imm.asTrustedImm32(), dest); | - |
1249 | } | - |
1250 | - | |
1251 | void andPtr(Imm32 imm, RegisterID dest) | - |
1252 | { | - |
1253 | if (shouldBlind(imm)) { | - |
1254 | BlindedImm32 key = andBlindedConstant(imm); | - |
1255 | andPtr(key.value1, dest); | - |
1256 | andPtr(key.value2, dest); | - |
1257 | } else | - |
1258 | andPtr(imm.asTrustedImm32(), dest); | - |
1259 | } | - |
1260 | - | |
1261 | void and32(Imm32 imm, RegisterID src, RegisterID dest) | - |
1262 | { | - |
1263 | if (shouldBlind(imm)) { | - |
1264 | if (src == dest) | - |
1265 | return and32(imm.asTrustedImm32(), dest); | - |
1266 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | - |
1267 | and32(src, dest); | - |
1268 | } else | - |
1269 | and32(imm.asTrustedImm32(), src, dest); | - |
1270 | } | - |
1271 | - | |
1272 | void move(Imm32 imm, RegisterID dest) | - |
1273 | { | - |
1274 | if (shouldBlind(imm)) | - |
1275 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | - |
1276 | else | - |
1277 | move(imm.asTrustedImm32(), dest); | - |
1278 | } | - |
1279 | - | |
1280 | void or32(Imm32 imm, RegisterID src, RegisterID dest) | - |
1281 | { | - |
1282 | if (shouldBlind(imm)) { | - |
1283 | if (src == dest) | - |
1284 | return or32(imm, dest); | - |
1285 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | - |
1286 | or32(src, dest); | - |
1287 | } else | - |
1288 | or32(imm.asTrustedImm32(), src, dest); | - |
1289 | } | - |
1290 | - | |
1291 | void or32(Imm32 imm, RegisterID dest) | - |
1292 | { | - |
1293 | if (shouldBlind(imm)) { | - |
1294 | BlindedImm32 key = orBlindedConstant(imm); | - |
1295 | or32(key.value1, dest); | - |
1296 | or32(key.value2, dest); | - |
1297 | } else | - |
1298 | or32(imm.asTrustedImm32(), dest); | - |
1299 | } | - |
1300 | - | |
1301 | void poke(Imm32 value, int index = 0) | - |
1302 | { | - |
1303 | store32(value, addressForPoke(index)); | - |
1304 | } | - |
1305 | - | |
1306 | void poke(ImmPtr value, int index = 0) | - |
1307 | { | - |
1308 | storePtr(value, addressForPoke(index)); | - |
1309 | } | - |
1310 | - | |
1311 | #if CPU(X86_64) | - |
1312 | void poke(Imm64 value, int index = 0) | - |
1313 | { | - |
1314 | store64(value, addressForPoke(index)); | - |
1315 | } | - |
1316 | #endif | - |
1317 | - | |
1318 | void store32(Imm32 imm, Address dest) | - |
1319 | { | - |
1320 | if (shouldBlind(imm)) { | - |
1321 | #if CPU(X86) || CPU(X86_64) | - |
1322 | BlindedImm32 blind = xorBlindConstant(imm); | - |
1323 | store32(blind.value1, dest); | - |
1324 | xor32(blind.value2, dest); | - |
1325 | #else | - |
1326 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | - |
1327 | loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); | - |
1328 | store32(scratchRegister, dest); | - |
1329 | } else { | - |
1330 | // If we don't have a scratch register available for use, we'll just | - |
1331 | // place a random number of nops. | - |
1332 | uint32_t nopCount = random() & 3; | - |
1333 | while (nopCount--) | - |
1334 | nop(); | - |
1335 | store32(imm.asTrustedImm32(), dest); | - |
1336 | } | - |
1337 | #endif | - |
1338 | } else | - |
1339 | store32(imm.asTrustedImm32(), dest); | - |
1340 | } | - |
1341 | - | |
1342 | void sub32(Imm32 imm, RegisterID dest) | - |
1343 | { | - |
1344 | if (shouldBlind(imm)) { | - |
1345 | BlindedImm32 key = additionBlindedConstant(imm); | - |
1346 | sub32(key.value1, dest); | - |
1347 | sub32(key.value2, dest); | - |
1348 | } else | - |
1349 | sub32(imm.asTrustedImm32(), dest); | - |
1350 | } | - |
1351 | - | |
1352 | void subPtr(Imm32 imm, RegisterID dest) | - |
1353 | { | - |
1354 | if (shouldBlind(imm)) { | - |
1355 | BlindedImm32 key = additionBlindedConstant(imm); | - |
1356 | subPtr(key.value1, dest); | - |
1357 | subPtr(key.value2, dest); | - |
1358 | } else | - |
1359 | subPtr(imm.asTrustedImm32(), dest); | - |
1360 | } | - |
1361 | - | |
1362 | void xor32(Imm32 imm, RegisterID src, RegisterID dest) | - |
1363 | { | - |
1364 | if (shouldBlind(imm)) { | - |
1365 | BlindedImm32 blind = xorBlindConstant(imm); | - |
1366 | xor32(blind.value1, src, dest); | - |
1367 | xor32(blind.value2, dest); | - |
1368 | } else | - |
1369 | xor32(imm.asTrustedImm32(), src, dest); | - |
1370 | } | - |
1371 | - | |
1372 | void xor32(Imm32 imm, RegisterID dest) | - |
1373 | { | - |
1374 | if (shouldBlind(imm)) { | - |
1375 | BlindedImm32 blind = xorBlindConstant(imm); | - |
1376 | xor32(blind.value1, dest); | - |
1377 | xor32(blind.value2, dest); | - |
1378 | } else | - |
1379 | xor32(imm.asTrustedImm32(), dest); | - |
1380 | } | - |
1381 | - | |
1382 | Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right) | - |
1383 | { | - |
1384 | if (shouldBlind(right)) { | - |
1385 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | - |
1386 | loadXorBlindedConstant(xorBlindConstant(right), scratchRegister); | - |
1387 | return branch32(cond, left, scratchRegister); | - |
1388 | } | - |
1389 | // If we don't have a scratch register available for use, we'll just | - |
1390 | // place a random number of nops. | - |
1391 | uint32_t nopCount = random() & 3; | - |
1392 | while (nopCount--) | - |
1393 | nop(); | - |
1394 | return branch32(cond, left, right.asTrustedImm32()); | - |
1395 | } | - |
1396 | - | |
1397 | return branch32(cond, left, right.asTrustedImm32()); | - |
1398 | } | - |
1399 | - | |
1400 | Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) | - |
1401 | { | - |
1402 | if (src == dest) | - |
1403 | ASSERT(scratchRegisterForBlinding()); | - |
1404 | - | |
1405 | if (shouldBlind(imm)) { | - |
1406 | if (src == dest) { | - |
1407 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | - |
1408 | move(src, scratchRegister); | - |
1409 | src = scratchRegister; | - |
1410 | } | - |
1411 | } | - |
1412 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | - |
1413 | return branchAdd32(cond, src, dest); | - |
1414 | } | - |
1415 | return branchAdd32(cond, src, imm.asTrustedImm32(), dest); | - |
1416 | } | - |
1417 | - | |
1418 | Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest) | - |
1419 | { | - |
1420 | if (src == dest) | - |
1421 | ASSERT(scratchRegisterForBlinding()); | - |
1422 | - | |
1423 | if (shouldBlind(imm)) { | - |
1424 | if (src == dest) { | - |
1425 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | - |
1426 | move(src, scratchRegister); | - |
1427 | src = scratchRegister; | - |
1428 | } | - |
1429 | } | - |
1430 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | - |
1431 | return branchMul32(cond, src, dest); | - |
1432 | } | - |
1433 | return branchMul32(cond, imm.asTrustedImm32(), src, dest); | - |
1434 | } | - |
1435 | - | |
1436 | // branchSub32 takes a scratch register as 32 bit platforms make use of this, | - |
1437 | // with src == dst, and on x86-32 we don't have a platform scratch register. | - |
1438 | Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch) | - |
1439 | { | - |
1440 | if (shouldBlind(imm)) { | - |
1441 | ASSERT(scratch != dest); | - |
1442 | ASSERT(scratch != src); | - |
1443 | loadXorBlindedConstant(xorBlindConstant(imm), scratch); | - |
1444 | return branchSub32(cond, src, scratch, dest); | - |
1445 | } | - |
1446 | return branchSub32(cond, src, imm.asTrustedImm32(), dest); | - |
1447 | } | - |
1448 | - | |
1449 | // Immediate shifts only have 5 controllable bits | - |
1450 | // so we'll consider them safe for now. | - |
1451 | TrustedImm32 trustedImm32ForShift(Imm32 imm) | - |
1452 | { | - |
1453 | return TrustedImm32(imm.asTrustedImm32().m_value & 31); | - |
1454 | } | - |
1455 | - | |
1456 | void lshift32(Imm32 imm, RegisterID dest) | - |
1457 | { | - |
1458 | lshift32(trustedImm32ForShift(imm), dest); | - |
1459 | } | - |
1460 | - | |
1461 | void lshift32(RegisterID src, Imm32 amount, RegisterID dest) | - |
1462 | { | - |
1463 | lshift32(src, trustedImm32ForShift(amount), dest); | - |
1464 | } | - |
1465 | - | |
1466 | void rshift32(Imm32 imm, RegisterID dest) | - |
1467 | { | - |
1468 | rshift32(trustedImm32ForShift(imm), dest); | - |
1469 | } | - |
1470 | - | |
1471 | void rshift32(RegisterID src, Imm32 amount, RegisterID dest) | - |
1472 | { | - |
1473 | rshift32(src, trustedImm32ForShift(amount), dest); | - |
1474 | } | - |
1475 | - | |
1476 | void urshift32(Imm32 imm, RegisterID dest) | - |
1477 | { | - |
1478 | urshift32(trustedImm32ForShift(imm), dest); | - |
1479 | } | - |
1480 | - | |
1481 | void urshift32(RegisterID src, Imm32 amount, RegisterID dest) | - |
1482 | { | - |
1483 | urshift32(src, trustedImm32ForShift(amount), dest); | - |
1484 | } | - |
1485 | #endif | - |
1486 | }; | - |
1487 | - | |
1488 | #if CPU(ARM_THUMB2) | - |
1489 | typedef MacroAssembler<MacroAssemblerARMv7> DefaultMacroAssembler; | - |
1490 | #elif CPU(ARM64) | - |
1491 | typedef MacroAssembler<MacroAssemblerARM64> DefaultMacroAssembler; | - |
1492 | #elif CPU(ARM_TRADITIONAL) | - |
1493 | typedef MacroAssembler<MacroAssemblerARM> DefaultMacroAssembler; | - |
1494 | #elif CPU(MIPS) | - |
1495 | typedef MacroAssembler<MacroAssemblerMIPS> DefaultMacroAssembler; | - |
1496 | #elif CPU(X86) | - |
1497 | typedef MacroAssembler<MacroAssemblerX86> DefaultMacroAssembler; | - |
1498 | #elif CPU(X86_64) | - |
1499 | typedef MacroAssembler<MacroAssemblerX86_64> DefaultMacroAssembler; | - |
1500 | #elif CPU(SH4) | - |
1501 | typedef JSC::MacroAssemblerSH4 DefaultMacroAssembler; | - |
1502 | #endif | - |
1503 | - | |
1504 | } // namespace JSC | - |
1505 | - | |
1506 | #else // ENABLE(ASSEMBLER) | - |
1507 | - | |
1508 | // If there is no assembler for this platform, at least allow code to make references to | - |
1509 | // some of the things it would otherwise define, albeit without giving that code any way | - |
1510 | // of doing anything useful. | - |
1511 | class MacroAssembler { | - |
1512 | private: | - |
1513 | MacroAssembler() { } | - |
1514 | - | |
1515 | public: | - |
1516 | - | |
1517 | enum RegisterID { NoRegister }; | - |
1518 | enum FPRegisterID { NoFPRegister }; | - |
1519 | }; | - |
1520 | - | |
1521 | #endif // ENABLE(ASSEMBLER) | - |
1522 | - | |
1523 | #endif // MacroAssembler_h | - |
Source code | Switch to Preprocessed file |