From eb53c0f68791d34b85c273f86a932f0abb1d0933 Mon Sep 17 00:00:00 2001 From: Myles Borins Date: Sat, 18 Feb 2023 19:04:06 -0500 Subject: [PATCH] 2023-02-21, Version 19.7.0 (Current) Notable changes: deps: * upgrade npm to 9.5.0 (npm team) https://github.com/nodejs/node/pull/46673 * add ada as a dependency (Yagiz Nizipli) https://github.com/nodejs/node/pull/46410 doc: * add debadree25 to collaborators (Debadree Chatterjee) https://github.com/nodejs/node/pull/46716 * add deokjinkim to collaborators (Deokjin Kim) https://github.com/nodejs/node/pull/46444 doc,lib,src,test: * rename --test-coverage (Colin Ihrig) https://github.com/nodejs/node/pull/46017 lib: * (SEMVER-MINOR) add aborted() utility function (Debadree Chatterjee) https://github.com/nodejs/node/pull/46494 src: * (SEMVER-MINOR) add initial support for single executable applications (Darshan Sen) https://github.com/nodejs/node/pull/45038 * (SEMVER-MINOR) allow optional Isolate termination in node::Stop() (Shelley Vohr) https://github.com/nodejs/node/pull/46583 * (SEMVER-MINOR) allow blobs in addition to `FILE*`s in embedder snapshot API (Anna Henningsen) https://github.com/nodejs/node/pull/46491 * (SEMVER-MINOR) allow snapshotting from the embedder API (Anna Henningsen) https://github.com/nodejs/node/pull/45888 * (SEMVER-MINOR) make build_snapshot a per-Isolate option, rather than a global one (Anna Henningsen) https://github.com/nodejs/node/pull/45888 * (SEMVER-MINOR) add snapshot support for embedder API (Anna Henningsen) https://github.com/nodejs/node/pull/45888 * (SEMVER-MINOR) allow embedder control of code generation policy (Shelley Vohr) https://github.com/nodejs/node/pull/46368 stream: * (SEMVER-MINOR) add abort signal for ReadableStream and WritableStream (Debadree Chatterjee) https://github.com/nodejs/node/pull/46273 test_runner: * add initial code coverage support (Colin Ihrig) https://github.com/nodejs/node/pull/46017 url: * replace url-parser with ada (Yagiz Nizipli) https://github.com/nodejs/node/pull/46410 PR-URL: TODO --- CHANGELOG.md | 3 +- deps/cares/src/lib/ares_library_init.h | 43 + deps/corepack/dist/vcc.js | 424 + ...9_zip_node_modules_proxy-agent_index_js.js | 41282 ++++++++++++++++ .../@npmcli/name-from-folder/index.js | 7 + .../node_modules/buffer/AUTHORS.md | 73 + .../node_modules/buffer/LICENSE | 21 + .../node_modules/buffer/index.d.ts | 194 + .../node_modules/buffer/index.js | 2106 + .../node_modules/buffer/package.json | 93 + .../node_modules/readable-stream/LICENSE | 47 + .../readable-stream/lib/_stream_duplex.js | 3 + .../lib/_stream_passthrough.js | 3 + .../readable-stream/lib/_stream_readable.js | 3 + .../readable-stream/lib/_stream_transform.js | 3 + .../readable-stream/lib/_stream_writable.js | 3 + .../lib/internal/streams/add-abort-signal.js | 52 + .../lib/internal/streams/buffer_list.js | 180 + .../lib/internal/streams/compose.js | 161 + .../lib/internal/streams/destroy.js | 337 + .../lib/internal/streams/duplex.js | 157 + .../lib/internal/streams/duplexify.js | 425 + .../lib/internal/streams/end-of-stream.js | 262 + .../lib/internal/streams/from.js | 115 + .../lib/internal/streams/lazy_transform.js | 61 + .../lib/internal/streams/legacy.js | 100 + .../lib/internal/streams/operators.js | 534 + .../lib/internal/streams/passthrough.js | 42 + .../lib/internal/streams/pipeline.js | 434 + .../lib/internal/streams/readable.js | 1339 + .../lib/internal/streams/state.js | 33 + .../lib/internal/streams/transform.js | 196 + .../lib/internal/streams/utils.js | 328 + .../lib/internal/streams/writable.js | 893 + .../lib/internal/validators.js | 417 + .../readable-stream/lib/ours/browser.js | 36 + .../readable-stream/lib/ours/errors.js | 391 + .../readable-stream/lib/ours/index.js | 67 + .../readable-stream/lib/ours/primordials.js | 130 + .../readable-stream/lib/ours/util.js | 150 + .../readable-stream/lib/stream.js | 162 + .../readable-stream/lib/stream/promises.js | 43 + .../node_modules/readable-stream/package.json | 84 + deps/npm/node_modules/fs-minipass/index.js | 422 + .../fs-minipass/node_modules/minipass/LICENSE | 15 + .../node_modules/minipass/index.d.ts | 155 + .../node_modules/minipass/index.js | 649 + .../node_modules/minipass/package.json | 56 + .../npm/node_modules/just-diff-apply/index.js | 161 + deps/npm/node_modules/just-diff/index.js | 153 + deps/npm/node_modules/minimatch/lib/path.js | 4 + deps/npm/node_modules/minimatch/minimatch.js | 906 + deps/npm/node_modules/mute-stream/mute.js | 145 + .../node_modules/@npmcli/move-file/LICENSE.md | 22 + .../@npmcli/move-file/lib/index.js | 185 + .../@npmcli/move-file/package.json | 47 + .../npm-user-validate/npm-user-validate.js | 61 + .../node_modules/promzard/example/buffer.js | 12 + .../node_modules/promzard/example/index.js | 11 + .../promzard/example/npm-init/init-input.js | 191 + .../promzard/example/npm-init/init.js | 37 + .../promzard/example/npm-init/package.json | 10 + .../promzard/example/substack-input.js | 61 + deps/npm/node_modules/promzard/promzard.js | 238 + deps/npm/node_modules/promzard/test/basic.js | 91 + deps/npm/node_modules/promzard/test/buffer.js | 84 + .../node_modules/promzard/test/exports.input | 5 + .../npm/node_modules/promzard/test/exports.js | 48 + deps/npm/node_modules/promzard/test/fn.input | 18 + deps/npm/node_modules/promzard/test/fn.js | 56 + .../node_modules/promzard/test/simple.input | 8 + deps/npm/node_modules/promzard/test/simple.js | 30 + .../node_modules/promzard/test/validate.input | 8 + .../node_modules/promzard/test/validate.js | 20 + .../readable-stream/CONTRIBUTING.md | 38 + .../readable-stream/GOVERNANCE.md | 136 + .../readable-stream/errors-browser.js | 127 + .../node_modules/readable-stream/errors.js | 116 + .../readable-stream/experimentalWarning.js | 17 + .../lib/internal/streams/async_iterator.js | 207 + .../lib/internal/streams/from-browser.js | 3 + .../lib/internal/streams/stream-browser.js | 1 + .../lib/internal/streams/stream.js | 1 + .../readable-stream/readable-browser.js | 9 + .../node_modules/readable-stream/readable.js | 16 + deps/v8/.vpython | 91 + deps/v8/bazel/BUILD.zlib | 69 + deps/v8/gni/v8.cmx | 52 + .../atomicops_internals_atomicword_compat.h | 89 + deps/v8/src/base/functional.cc | 110 + .../mips/baseline-assembler-mips-inl.h | 526 + .../mips/baseline-compiler-mips-inl.h | 78 + .../riscv64/baseline-assembler-riscv64-inl.h | 540 + .../riscv64/baseline-compiler-riscv64-inl.h | 79 + .../v8/src/builtins/builtins-shadow-realms.cc | 226 + .../src/builtins/builtins-shadowrealm-gen.cc | 248 + deps/v8/src/builtins/mips/builtins-mips.cc | 4213 ++ .../src/builtins/riscv64/builtins-riscv64.cc | 3870 ++ deps/v8/src/codegen/mips/assembler-mips-inl.h | 353 + deps/v8/src/codegen/mips/assembler-mips.cc | 3853 ++ deps/v8/src/codegen/mips/assembler-mips.h | 1924 + deps/v8/src/codegen/mips/constants-mips.cc | 144 + deps/v8/src/codegen/mips/constants-mips.h | 1924 + deps/v8/src/codegen/mips/cpu-mips.cc | 45 + .../mips/interface-descriptors-mips-inl.h | 315 + .../src/codegen/mips/macro-assembler-mips.cc | 5638 +++ .../src/codegen/mips/macro-assembler-mips.h | 1202 + deps/v8/src/codegen/mips/register-mips.h | 299 + deps/v8/src/codegen/mips/reglist-mips.h | 48 + .../codegen/riscv64/assembler-riscv64-inl.h | 327 + .../src/codegen/riscv64/assembler-riscv64.cc | 4095 ++ .../src/codegen/riscv64/assembler-riscv64.h | 1829 + .../src/codegen/riscv64/constants-riscv64.cc | 245 + .../src/codegen/riscv64/constants-riscv64.h | 1986 + deps/v8/src/codegen/riscv64/cpu-riscv64.cc | 32 + .../interface-descriptors-riscv64-inl.h | 327 + .../riscv64/macro-assembler-riscv64.cc | 5170 ++ .../codegen/riscv64/macro-assembler-riscv64.h | 1363 + .../v8/src/codegen/riscv64/register-riscv64.h | 314 + deps/v8/src/codegen/riscv64/reglist-riscv64.h | 64 + deps/v8/src/common/allow-deprecated.h | 37 + .../backend/mips/code-generator-mips.cc | 4455 ++ .../backend/mips/instruction-codes-mips.h | 402 + .../mips/instruction-scheduler-mips.cc | 1806 + .../backend/mips/instruction-selector-mips.cc | 2566 + .../backend/riscv64/code-generator-riscv64.cc | 4404 ++ .../riscv64/instruction-codes-riscv64.h | 434 + .../riscv64/instruction-scheduler-riscv64.cc | 1561 + .../riscv64/instruction-selector-riscv64.cc | 3403 ++ deps/v8/src/debug/debug-type-profile.cc | 121 + deps/v8/src/debug/debug-type-profile.h | 47 + .../src/deoptimizer/mips/deoptimizer-mips.cc | 34 + .../riscv64/deoptimizer-riscv64.cc | 34 + deps/v8/src/diagnostics/mips/disasm-mips.cc | 2736 + deps/v8/src/diagnostics/mips/unwinder-mips.cc | 14 + .../src/diagnostics/riscv64/disasm-riscv64.cc | 2946 ++ .../diagnostics/riscv64/unwinder-riscv64.cc | 14 + .../src/diagnostics/system-jit-metadata-win.h | 246 + deps/v8/src/diagnostics/system-jit-win.cc | 134 + deps/v8/src/diagnostics/system-jit-win.h | 21 + .../execution/mips/frame-constants-mips.cc | 32 + .../src/execution/mips/frame-constants-mips.h | 84 + deps/v8/src/execution/mips/simulator-mips.cc | 7295 +++ deps/v8/src/execution/mips/simulator-mips.h | 719 + .../riscv64/frame-constants-riscv64.cc | 32 + .../riscv64/frame-constants-riscv64.h | 83 + .../execution/riscv64/simulator-riscv64.cc | 7313 +++ .../src/execution/riscv64/simulator-riscv64.h | 1077 + .../heap/base/asm/ia32/push_registers_masm.S | 48 + .../heap/base/asm/mips/push_registers_asm.cc | 48 + .../base/asm/riscv64/push_registers_asm.cc | 51 + .../heap/base/asm/x64/push_registers_masm.S | 57 + deps/v8/src/logging/log-utils.cc | 303 + deps/v8/src/logging/log-utils.h | 157 + deps/v8/src/objects/js-shadow-realms-inl.h | 28 + deps/v8/src/objects/js-shadow-realms.h | 39 + deps/v8/src/objects/js-shadow-realms.tq | 5 + .../objects/osr-optimized-code-cache-inl.h | 25 + .../src/objects/osr-optimized-code-cache.cc | 303 + .../v8/src/objects/osr-optimized-code-cache.h | 118 + .../mips/regexp-macro-assembler-mips.cc | 1359 + .../regexp/mips/regexp-macro-assembler-mips.h | 231 + deps/v8/src/regexp/property-sequences.cc | 1246 + deps/v8/src/regexp/property-sequences.h | 28 + .../riscv64/regexp-macro-assembler-riscv64.cc | 1371 + .../riscv64/regexp-macro-assembler-riscv64.h | 236 + .../baseline/mips/liftoff-assembler-mips.h | 3148 ++ .../riscv64/liftoff-assembler-riscv64.h | 3857 ++ deps/v8/src/wasm/init-expr-interface.cc | 241 + deps/v8/src/wasm/init-expr-interface.h | 96 + deps/v8/src/wasm/memory-protection-key.cc | 230 + deps/v8/src/wasm/memory-protection-key.h | 96 + deps/v8/src/wasm/signature-map.cc | 32 + deps/v8/src/wasm/signature-map.h | 54 + deps/v8/test/cctest/compiler/c-signature.h | 144 + deps/v8/test/cctest/compiler/call-tester.h | 75 + .../cctest/compiler/code-assembler-tester.h | 85 + .../cctest/compiler/node-observer-tester.h | 91 + .../test-run-bytecode-graph-builder.cc | 2969 ++ .../v8/test/cctest/compiler/test-run-deopt.cc | 119 + .../cctest/compiler/test-run-jsbranches.cc | 324 + .../test/cctest/compiler/test-run-jscalls.cc | 197 + .../cctest/compiler/test-run-jsexceptions.cc | 276 + .../cctest/compiler/test-run-jsobjects.cc | 57 + .../v8/test/cctest/compiler/test-run-jsops.cc | 534 + .../cctest/compiler/test-run-tail-calls.cc | 177 + .../cctest/compiler/test-sloppy-equality.cc | 142 + deps/v8/test/cctest/compiler/value-helper.cc | 21 + deps/v8/test/cctest/compiler/value-helper.h | 431 + deps/v8/test/cctest/disasm-regex-helper.cc | 292 + deps/v8/test/cctest/disasm-regex-helper.h | 318 + deps/v8/test/cctest/gay-fixed.cc | 0 doc/api/async_context.md | 2 +- doc/api/cli.md | 2 +- doc/api/single-executable-applications.md | 2 +- doc/api/stream.md | 4 +- doc/api/util.md | 2 +- doc/changelogs/CHANGELOG_V19.md | 139 + src/node_version.h | 6 +- 199 files changed, 159561 insertions(+), 10 deletions(-) create mode 100644 deps/cares/src/lib/ares_library_init.h create mode 100644 deps/corepack/dist/vcc.js create mode 100644 deps/corepack/dist/vendors-_yarn_berry_cache_proxy-agent-npm-5_0_0-41772f4b01-9_zip_node_modules_proxy-agent_index_js.js create mode 100644 deps/npm/node_modules/@npmcli/name-from-folder/index.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/buffer/AUTHORS.md create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/buffer/LICENSE create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/buffer/index.d.ts create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/buffer/index.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/buffer/package.json create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/LICENSE create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/_stream_duplex.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/_stream_passthrough.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/_stream_readable.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/_stream_transform.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/_stream_writable.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/add-abort-signal.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/buffer_list.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/compose.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/destroy.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/duplex.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/duplexify.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/end-of-stream.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/from.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/lazy_transform.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/legacy.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/operators.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/passthrough.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/pipeline.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/readable.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/state.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/transform.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/utils.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/streams/writable.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/internal/validators.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/ours/browser.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/ours/errors.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/ours/index.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/ours/primordials.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/ours/util.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/stream.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/lib/stream/promises.js create mode 100644 deps/npm/node_modules/are-we-there-yet/node_modules/readable-stream/package.json create mode 100644 deps/npm/node_modules/fs-minipass/index.js create mode 100644 deps/npm/node_modules/fs-minipass/node_modules/minipass/LICENSE create mode 100644 deps/npm/node_modules/fs-minipass/node_modules/minipass/index.d.ts create mode 100644 deps/npm/node_modules/fs-minipass/node_modules/minipass/index.js create mode 100644 deps/npm/node_modules/fs-minipass/node_modules/minipass/package.json create mode 100644 deps/npm/node_modules/just-diff-apply/index.js create mode 100644 deps/npm/node_modules/just-diff/index.js create mode 100644 deps/npm/node_modules/minimatch/lib/path.js create mode 100644 deps/npm/node_modules/minimatch/minimatch.js create mode 100644 deps/npm/node_modules/mute-stream/mute.js create mode 100644 deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/LICENSE.md create mode 100644 deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/lib/index.js create mode 100644 deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/package.json create mode 100644 deps/npm/node_modules/npm-user-validate/npm-user-validate.js create mode 100644 deps/npm/node_modules/promzard/example/buffer.js create mode 100644 deps/npm/node_modules/promzard/example/index.js create mode 100644 deps/npm/node_modules/promzard/example/npm-init/init-input.js create mode 100644 deps/npm/node_modules/promzard/example/npm-init/init.js create mode 100644 deps/npm/node_modules/promzard/example/npm-init/package.json create mode 100644 deps/npm/node_modules/promzard/example/substack-input.js create mode 100644 deps/npm/node_modules/promzard/promzard.js create mode 100644 deps/npm/node_modules/promzard/test/basic.js create mode 100644 deps/npm/node_modules/promzard/test/buffer.js create mode 100644 deps/npm/node_modules/promzard/test/exports.input create mode 100644 deps/npm/node_modules/promzard/test/exports.js create mode 100644 deps/npm/node_modules/promzard/test/fn.input create mode 100644 deps/npm/node_modules/promzard/test/fn.js create mode 100644 deps/npm/node_modules/promzard/test/simple.input create mode 100644 deps/npm/node_modules/promzard/test/simple.js create mode 100644 deps/npm/node_modules/promzard/test/validate.input create mode 100644 deps/npm/node_modules/promzard/test/validate.js create mode 100644 deps/npm/node_modules/readable-stream/CONTRIBUTING.md create mode 100644 deps/npm/node_modules/readable-stream/GOVERNANCE.md create mode 100644 deps/npm/node_modules/readable-stream/errors-browser.js create mode 100644 deps/npm/node_modules/readable-stream/errors.js create mode 100644 deps/npm/node_modules/readable-stream/experimentalWarning.js create mode 100644 deps/npm/node_modules/readable-stream/lib/internal/streams/async_iterator.js create mode 100644 deps/npm/node_modules/readable-stream/lib/internal/streams/from-browser.js create mode 100644 deps/npm/node_modules/readable-stream/lib/internal/streams/stream-browser.js create mode 100644 deps/npm/node_modules/readable-stream/lib/internal/streams/stream.js create mode 100644 deps/npm/node_modules/readable-stream/readable-browser.js create mode 100644 deps/npm/node_modules/readable-stream/readable.js create mode 100644 deps/v8/.vpython create mode 100644 deps/v8/bazel/BUILD.zlib create mode 100644 deps/v8/gni/v8.cmx create mode 100644 deps/v8/src/base/atomicops_internals_atomicword_compat.h create mode 100644 deps/v8/src/base/functional.cc create mode 100644 deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h create mode 100644 deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h create mode 100644 deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h create mode 100644 deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h create mode 100644 deps/v8/src/builtins/builtins-shadow-realms.cc create mode 100644 deps/v8/src/builtins/builtins-shadowrealm-gen.cc create mode 100644 deps/v8/src/builtins/mips/builtins-mips.cc create mode 100644 deps/v8/src/builtins/riscv64/builtins-riscv64.cc create mode 100644 deps/v8/src/codegen/mips/assembler-mips-inl.h create mode 100644 deps/v8/src/codegen/mips/assembler-mips.cc create mode 100644 deps/v8/src/codegen/mips/assembler-mips.h create mode 100644 deps/v8/src/codegen/mips/constants-mips.cc create mode 100644 deps/v8/src/codegen/mips/constants-mips.h create mode 100644 deps/v8/src/codegen/mips/cpu-mips.cc create mode 100644 deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h create mode 100644 deps/v8/src/codegen/mips/macro-assembler-mips.cc create mode 100644 deps/v8/src/codegen/mips/macro-assembler-mips.h create mode 100644 deps/v8/src/codegen/mips/register-mips.h create mode 100644 deps/v8/src/codegen/mips/reglist-mips.h create mode 100644 deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h create mode 100644 deps/v8/src/codegen/riscv64/assembler-riscv64.cc create mode 100644 deps/v8/src/codegen/riscv64/assembler-riscv64.h create mode 100644 deps/v8/src/codegen/riscv64/constants-riscv64.cc create mode 100644 deps/v8/src/codegen/riscv64/constants-riscv64.h create mode 100644 deps/v8/src/codegen/riscv64/cpu-riscv64.cc create mode 100644 deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h create mode 100644 deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc create mode 100644 deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h create mode 100644 deps/v8/src/codegen/riscv64/register-riscv64.h create mode 100644 deps/v8/src/codegen/riscv64/reglist-riscv64.h create mode 100644 deps/v8/src/common/allow-deprecated.h create mode 100644 deps/v8/src/compiler/backend/mips/code-generator-mips.cc create mode 100644 deps/v8/src/compiler/backend/mips/instruction-codes-mips.h create mode 100644 deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc create mode 100644 deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc create mode 100644 deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc create mode 100644 deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h create mode 100644 deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc create mode 100644 deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc create mode 100644 deps/v8/src/debug/debug-type-profile.cc create mode 100644 deps/v8/src/debug/debug-type-profile.h create mode 100644 deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc create mode 100644 deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc create mode 100644 deps/v8/src/diagnostics/mips/disasm-mips.cc create mode 100644 deps/v8/src/diagnostics/mips/unwinder-mips.cc create mode 100644 deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc create mode 100644 deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc create mode 100644 deps/v8/src/diagnostics/system-jit-metadata-win.h create mode 100644 deps/v8/src/diagnostics/system-jit-win.cc create mode 100644 deps/v8/src/diagnostics/system-jit-win.h create mode 100644 deps/v8/src/execution/mips/frame-constants-mips.cc create mode 100644 deps/v8/src/execution/mips/frame-constants-mips.h create mode 100644 deps/v8/src/execution/mips/simulator-mips.cc create mode 100644 deps/v8/src/execution/mips/simulator-mips.h create mode 100644 deps/v8/src/execution/riscv64/frame-constants-riscv64.cc create mode 100644 deps/v8/src/execution/riscv64/frame-constants-riscv64.h create mode 100644 deps/v8/src/execution/riscv64/simulator-riscv64.cc create mode 100644 deps/v8/src/execution/riscv64/simulator-riscv64.h create mode 100644 deps/v8/src/heap/base/asm/ia32/push_registers_masm.S create mode 100644 deps/v8/src/heap/base/asm/mips/push_registers_asm.cc create mode 100644 deps/v8/src/heap/base/asm/riscv64/push_registers_asm.cc create mode 100644 deps/v8/src/heap/base/asm/x64/push_registers_masm.S create mode 100644 deps/v8/src/logging/log-utils.cc create mode 100644 deps/v8/src/logging/log-utils.h create mode 100644 deps/v8/src/objects/js-shadow-realms-inl.h create mode 100644 deps/v8/src/objects/js-shadow-realms.h create mode 100644 deps/v8/src/objects/js-shadow-realms.tq create mode 100644 deps/v8/src/objects/osr-optimized-code-cache-inl.h create mode 100644 deps/v8/src/objects/osr-optimized-code-cache.cc create mode 100644 deps/v8/src/objects/osr-optimized-code-cache.h create mode 100644 deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc create mode 100644 deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h create mode 100644 deps/v8/src/regexp/property-sequences.cc create mode 100644 deps/v8/src/regexp/property-sequences.h create mode 100644 deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc create mode 100644 deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h create mode 100644 deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h create mode 100644 deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h create mode 100644 deps/v8/src/wasm/init-expr-interface.cc create mode 100644 deps/v8/src/wasm/init-expr-interface.h create mode 100644 deps/v8/src/wasm/memory-protection-key.cc create mode 100644 deps/v8/src/wasm/memory-protection-key.h create mode 100644 deps/v8/src/wasm/signature-map.cc create mode 100644 deps/v8/src/wasm/signature-map.h create mode 100644 deps/v8/test/cctest/compiler/c-signature.h create mode 100644 deps/v8/test/cctest/compiler/call-tester.h create mode 100644 deps/v8/test/cctest/compiler/code-assembler-tester.h create mode 100644 deps/v8/test/cctest/compiler/node-observer-tester.h create mode 100644 deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc create mode 100644 deps/v8/test/cctest/compiler/test-run-deopt.cc create mode 100644 deps/v8/test/cctest/compiler/test-run-jsbranches.cc create mode 100644 deps/v8/test/cctest/compiler/test-run-jscalls.cc create mode 100644 deps/v8/test/cctest/compiler/test-run-jsexceptions.cc create mode 100644 deps/v8/test/cctest/compiler/test-run-jsobjects.cc create mode 100644 deps/v8/test/cctest/compiler/test-run-jsops.cc create mode 100644 deps/v8/test/cctest/compiler/test-run-tail-calls.cc create mode 100644 deps/v8/test/cctest/compiler/test-sloppy-equality.cc create mode 100644 deps/v8/test/cctest/compiler/value-helper.cc create mode 100644 deps/v8/test/cctest/compiler/value-helper.h create mode 100644 deps/v8/test/cctest/disasm-regex-helper.cc create mode 100644 deps/v8/test/cctest/disasm-regex-helper.h create mode 100644 deps/v8/test/cctest/gay-fixed.cc diff --git a/CHANGELOG.md b/CHANGELOG.md index 910fcda0a60927..07561fa6ca06a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,7 +35,8 @@ release. -19.6.1
+19.7.0
+19.6.1
19.6.0
19.5.0
19.4.0
diff --git a/deps/cares/src/lib/ares_library_init.h b/deps/cares/src/lib/ares_library_init.h new file mode 100644 index 00000000000000..b3896d9f7bbed0 --- /dev/null +++ b/deps/cares/src/lib/ares_library_init.h @@ -0,0 +1,43 @@ +#ifndef HEADER_CARES_LIBRARY_INIT_H +#define HEADER_CARES_LIBRARY_INIT_H + + +/* Copyright 1998 by the Massachusetts Institute of Technology. + * Copyright (C) 2004-2011 by Daniel Stenberg + * + * Permission to use, copy, modify, and distribute this + * software and its documentation for any purpose and without + * fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting + * documentation, and that the name of M.I.T. not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * M.I.T. makes no representations about the suitability of + * this software for any purpose. It is provided "as is" + * without express or implied warranty. + */ + +#include "ares_setup.h" + +#ifdef USE_WINSOCK + +#include +#include "ares_iphlpapi.h" + +typedef DWORD (WINAPI *fpGetNetworkParams_t) (FIXED_INFO*, DWORD*); +typedef BOOLEAN (APIENTRY *fpSystemFunction036_t) (void*, ULONG); +typedef ULONG (WINAPI *fpGetAdaptersAddresses_t) ( ULONG, ULONG, void*, IP_ADAPTER_ADDRESSES*, ULONG* ); +typedef NETIO_STATUS (WINAPI *fpGetBestRoute2_t) ( NET_LUID *, NET_IFINDEX, const SOCKADDR_INET *, const SOCKADDR_INET *, ULONG, PMIB_IPFORWARD_ROW2, SOCKADDR_INET * ); +/* Forward-declaration of variables defined in ares_library_init.c */ +/* that are global and unique instances for whole c-ares library. */ + +extern fpGetNetworkParams_t ares_fpGetNetworkParams; +extern fpSystemFunction036_t ares_fpSystemFunction036; +extern fpGetAdaptersAddresses_t ares_fpGetAdaptersAddresses; +extern fpGetBestRoute2_t ares_fpGetBestRoute2; + +#endif /* USE_WINSOCK */ + +#endif /* HEADER_CARES_LIBRARY_INIT_H */ + diff --git a/deps/corepack/dist/vcc.js b/deps/corepack/dist/vcc.js new file mode 100644 index 00000000000000..2a8c9014b546d1 --- /dev/null +++ b/deps/corepack/dist/vcc.js @@ -0,0 +1,424 @@ +#!/usr/bin/env node +/* eslint-disable */ +/******/ (() => { // webpackBootstrap +/******/ var __webpack_modules__ = ({ + +/***/ "../../../.yarn/berry/cache/v8-compile-cache-npm-2.3.0-961375f150-9.zip/node_modules/v8-compile-cache/v8-compile-cache.js": +/*!********************************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/v8-compile-cache-npm-2.3.0-961375f150-9.zip/node_modules/v8-compile-cache/v8-compile-cache.js ***! + \********************************************************************************************************************************/ +/***/ (function(module, exports) { + +'use strict'; + +const Module = require('module'); +const crypto = require('crypto'); +const fs = require('fs'); +const path = require('path'); +const vm = require('vm'); +const os = require('os'); + +const hasOwnProperty = Object.prototype.hasOwnProperty; + +//------------------------------------------------------------------------------ +// FileSystemBlobStore +//------------------------------------------------------------------------------ + +class FileSystemBlobStore { + constructor(directory, prefix) { + const name = prefix ? slashEscape(prefix + '.') : ''; + this._blobFilename = path.join(directory, name + 'BLOB'); + this._mapFilename = path.join(directory, name + 'MAP'); + this._lockFilename = path.join(directory, name + 'LOCK'); + this._directory = directory; + this._load(); + } + + has(key, invalidationKey) { + if (hasOwnProperty.call(this._memoryBlobs, key)) { + return this._invalidationKeys[key] === invalidationKey; + } else if (hasOwnProperty.call(this._storedMap, key)) { + return this._storedMap[key][0] === invalidationKey; + } + return false; + } + + get(key, invalidationKey) { + if (hasOwnProperty.call(this._memoryBlobs, key)) { + if (this._invalidationKeys[key] === invalidationKey) { + return this._memoryBlobs[key]; + } + } else if (hasOwnProperty.call(this._storedMap, key)) { + const mapping = this._storedMap[key]; + if (mapping[0] === invalidationKey) { + return this._storedBlob.slice(mapping[1], mapping[2]); + } + } + } + + set(key, invalidationKey, buffer) { + this._invalidationKeys[key] = invalidationKey; + this._memoryBlobs[key] = buffer; + this._dirty = true; + } + + delete(key) { + if (hasOwnProperty.call(this._memoryBlobs, key)) { + this._dirty = true; + delete this._memoryBlobs[key]; + } + if (hasOwnProperty.call(this._invalidationKeys, key)) { + this._dirty = true; + delete this._invalidationKeys[key]; + } + if (hasOwnProperty.call(this._storedMap, key)) { + this._dirty = true; + delete this._storedMap[key]; + } + } + + isDirty() { + return this._dirty; + } + + save() { + const dump = this._getDump(); + const blobToStore = Buffer.concat(dump[0]); + const mapToStore = JSON.stringify(dump[1]); + + try { + mkdirpSync(this._directory); + fs.writeFileSync(this._lockFilename, 'LOCK', {flag: 'wx'}); + } catch (error) { + // Swallow the exception if we fail to acquire the lock. + return false; + } + + try { + fs.writeFileSync(this._blobFilename, blobToStore); + fs.writeFileSync(this._mapFilename, mapToStore); + } finally { + fs.unlinkSync(this._lockFilename); + } + + return true; + } + + _load() { + try { + this._storedBlob = fs.readFileSync(this._blobFilename); + this._storedMap = JSON.parse(fs.readFileSync(this._mapFilename)); + } catch (e) { + this._storedBlob = Buffer.alloc(0); + this._storedMap = {}; + } + this._dirty = false; + this._memoryBlobs = {}; + this._invalidationKeys = {}; + } + + _getDump() { + const buffers = []; + const newMap = {}; + let offset = 0; + + function push(key, invalidationKey, buffer) { + buffers.push(buffer); + newMap[key] = [invalidationKey, offset, offset + buffer.length]; + offset += buffer.length; + } + + for (const key of Object.keys(this._memoryBlobs)) { + const buffer = this._memoryBlobs[key]; + const invalidationKey = this._invalidationKeys[key]; + push(key, invalidationKey, buffer); + } + + for (const key of Object.keys(this._storedMap)) { + if (hasOwnProperty.call(newMap, key)) continue; + const mapping = this._storedMap[key]; + const buffer = this._storedBlob.slice(mapping[1], mapping[2]); + push(key, mapping[0], buffer); + } + + return [buffers, newMap]; + } +} + +//------------------------------------------------------------------------------ +// NativeCompileCache +//------------------------------------------------------------------------------ + +class NativeCompileCache { + constructor() { + this._cacheStore = null; + this._previousModuleCompile = null; + } + + setCacheStore(cacheStore) { + this._cacheStore = cacheStore; + } + + install() { + const self = this; + const hasRequireResolvePaths = typeof require.resolve.paths === 'function'; + this._previousModuleCompile = Module.prototype._compile; + Module.prototype._compile = function(content, filename) { + const mod = this; + + function require(id) { + return mod.require(id); + } + + // https://github.com/nodejs/node/blob/v10.15.3/lib/internal/modules/cjs/helpers.js#L28 + function resolve(request, options) { + return Module._resolveFilename(request, mod, false, options); + } + require.resolve = resolve; + + // https://github.com/nodejs/node/blob/v10.15.3/lib/internal/modules/cjs/helpers.js#L37 + // resolve.resolve.paths was added in v8.9.0 + if (hasRequireResolvePaths) { + resolve.paths = function paths(request) { + return Module._resolveLookupPaths(request, mod, true); + }; + } + + require.main = process.mainModule; + + // Enable support to add extra extension types + require.extensions = Module._extensions; + require.cache = Module._cache; + + const dirname = path.dirname(filename); + + const compiledWrapper = self._moduleCompile(filename, content); + + // We skip the debugger setup because by the time we run, node has already + // done that itself. + + // `Buffer` is included for Electron. + // See https://github.com/zertosh/v8-compile-cache/pull/10#issuecomment-518042543 + const args = [mod.exports, require, mod, filename, dirname, process, global, Buffer]; + return compiledWrapper.apply(mod.exports, args); + }; + } + + uninstall() { + Module.prototype._compile = this._previousModuleCompile; + } + + _moduleCompile(filename, content) { + // https://github.com/nodejs/node/blob/v7.5.0/lib/module.js#L511 + + // Remove shebang + var contLen = content.length; + if (contLen >= 2) { + if (content.charCodeAt(0) === 35/*#*/ && + content.charCodeAt(1) === 33/*!*/) { + if (contLen === 2) { + // Exact match + content = ''; + } else { + // Find end of shebang line and slice it off + var i = 2; + for (; i < contLen; ++i) { + var code = content.charCodeAt(i); + if (code === 10/*\n*/ || code === 13/*\r*/) break; + } + if (i === contLen) { + content = ''; + } else { + // Note that this actually includes the newline character(s) in the + // new output. This duplicates the behavior of the regular + // expression that was previously used to replace the shebang line + content = content.slice(i); + } + } + } + } + + // create wrapper function + var wrapper = Module.wrap(content); + + var invalidationKey = crypto + .createHash('sha1') + .update(content, 'utf8') + .digest('hex'); + + var buffer = this._cacheStore.get(filename, invalidationKey); + + var script = new vm.Script(wrapper, { + filename: filename, + lineOffset: 0, + displayErrors: true, + cachedData: buffer, + produceCachedData: true, + }); + + if (script.cachedDataProduced) { + this._cacheStore.set(filename, invalidationKey, script.cachedData); + } else if (script.cachedDataRejected) { + this._cacheStore.delete(filename); + } + + var compiledWrapper = script.runInThisContext({ + filename: filename, + lineOffset: 0, + columnOffset: 0, + displayErrors: true, + }); + + return compiledWrapper; + } +} + +//------------------------------------------------------------------------------ +// utilities +// +// https://github.com/substack/node-mkdirp/blob/f2003bb/index.js#L55-L98 +// https://github.com/zertosh/slash-escape/blob/e7ebb99/slash-escape.js +//------------------------------------------------------------------------------ + +function mkdirpSync(p_) { + _mkdirpSync(path.resolve(p_), 0o777); +} + +function _mkdirpSync(p, mode) { + try { + fs.mkdirSync(p, mode); + } catch (err0) { + if (err0.code === 'ENOENT') { + _mkdirpSync(path.dirname(p)); + _mkdirpSync(p); + } else { + try { + const stat = fs.statSync(p); + if (!stat.isDirectory()) { throw err0; } + } catch (err1) { + throw err0; + } + } + } +} + +function slashEscape(str) { + const ESCAPE_LOOKUP = { + '\\': 'zB', + ':': 'zC', + '/': 'zS', + '\x00': 'z0', + 'z': 'zZ', + }; + const ESCAPE_REGEX = /[\\:/\x00z]/g; // eslint-disable-line no-control-regex + return str.replace(ESCAPE_REGEX, match => ESCAPE_LOOKUP[match]); +} + +function supportsCachedData() { + const script = new vm.Script('""', {produceCachedData: true}); + // chakracore, as of v1.7.1.0, returns `false`. + return script.cachedDataProduced === true; +} + +function getCacheDir() { + const v8_compile_cache_cache_dir = process.env.V8_COMPILE_CACHE_CACHE_DIR; + if (v8_compile_cache_cache_dir) { + return v8_compile_cache_cache_dir; + } + + // Avoid cache ownership issues on POSIX systems. + const dirname = typeof process.getuid === 'function' + ? 'v8-compile-cache-' + process.getuid() + : 'v8-compile-cache'; + const version = typeof process.versions.v8 === 'string' + ? process.versions.v8 + : typeof process.versions.chakracore === 'string' + ? 'chakracore-' + process.versions.chakracore + : 'node-' + process.version; + const cacheDir = path.join(os.tmpdir(), dirname, version); + return cacheDir; +} + +function getMainName() { + // `require.main.filename` is undefined or null when: + // * node -e 'require("v8-compile-cache")' + // * node -r 'v8-compile-cache' + // * Or, requiring from the REPL. + const mainName = require.main && typeof require.main.filename === 'string' + ? require.main.filename + : process.cwd(); + return mainName; +} + +//------------------------------------------------------------------------------ +// main +//------------------------------------------------------------------------------ + +if (!process.env.DISABLE_V8_COMPILE_CACHE && supportsCachedData()) { + const cacheDir = getCacheDir(); + const prefix = getMainName(); + const blobStore = new FileSystemBlobStore(cacheDir, prefix); + + const nativeCompileCache = new NativeCompileCache(); + nativeCompileCache.setCacheStore(blobStore); + nativeCompileCache.install(); + + process.once('exit', () => { + if (blobStore.isDirty()) { + blobStore.save(); + } + nativeCompileCache.uninstall(); + }); +} + +module.exports.__TEST__ = { + FileSystemBlobStore, + NativeCompileCache, + mkdirpSync, + slashEscape, + supportsCachedData, + getCacheDir, + getMainName, +}; + + +/***/ }) + +/******/ }); +/************************************************************************/ +/******/ // The module cache +/******/ var __webpack_module_cache__ = {}; +/******/ +/******/ // The require function +/******/ function __webpack_require__(moduleId) { +/******/ // Check if module is in cache +/******/ var cachedModule = __webpack_module_cache__[moduleId]; +/******/ if (cachedModule !== undefined) { +/******/ return cachedModule.exports; +/******/ } +/******/ // Create a new module (and put it into the cache) +/******/ var module = __webpack_module_cache__[moduleId] = { +/******/ // no module.id needed +/******/ // no module.loaded needed +/******/ exports: {} +/******/ }; +/******/ +/******/ // Execute the module function +/******/ __webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__); +/******/ +/******/ // Return the exports of the module +/******/ return module.exports; +/******/ } +/******/ +/************************************************************************/ +/******/ +/******/ // startup +/******/ // Load entry module and return exports +/******/ // This entry module doesn't tell about it's top-level declarations so it can't be inlined +/******/ var __webpack_exports__ = __webpack_require__("../../../.yarn/berry/cache/v8-compile-cache-npm-2.3.0-961375f150-9.zip/node_modules/v8-compile-cache/v8-compile-cache.js"); +/******/ var __webpack_export_target__ = exports; +/******/ for(var i in __webpack_exports__) __webpack_export_target__[i] = __webpack_exports__[i]; +/******/ if(__webpack_exports__.__esModule) Object.defineProperty(__webpack_export_target__, "__esModule", { value: true }); +/******/ +/******/ })() +; \ No newline at end of file diff --git a/deps/corepack/dist/vendors-_yarn_berry_cache_proxy-agent-npm-5_0_0-41772f4b01-9_zip_node_modules_proxy-agent_index_js.js b/deps/corepack/dist/vendors-_yarn_berry_cache_proxy-agent-npm-5_0_0-41772f4b01-9_zip_node_modules_proxy-agent_index_js.js new file mode 100644 index 00000000000000..b2c2dd247a4e8c --- /dev/null +++ b/deps/corepack/dist/vendors-_yarn_berry_cache_proxy-agent-npm-5_0_0-41772f4b01-9_zip_node_modules_proxy-agent_index_js.js @@ -0,0 +1,41282 @@ +exports.id = "vendors-_yarn_berry_cache_proxy-agent-npm-5_0_0-41772f4b01-9_zip_node_modules_proxy-agent_index_js"; +exports.ids = ["vendors-_yarn_berry_cache_proxy-agent-npm-5_0_0-41772f4b01-9_zip_node_modules_proxy-agent_index_js"]; +exports.modules = { + +/***/ "../../../.yarn/berry/cache/@tootallnate-once-npm-1.1.2-0517220057-9.zip/node_modules/@tootallnate/once/dist/index.js": +/*!****************************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/@tootallnate-once-npm-1.1.2-0517220057-9.zip/node_modules/@tootallnate/once/dist/index.js ***! + \****************************************************************************************************************************/ +/***/ ((module) => { + +"use strict"; + +function noop() { } +function once(emitter, name) { + const o = once.spread(emitter, name); + const r = o.then((args) => args[0]); + r.cancel = o.cancel; + return r; +} +(function (once) { + function spread(emitter, name) { + let c = null; + const p = new Promise((resolve, reject) => { + function cancel() { + emitter.removeListener(name, onEvent); + emitter.removeListener('error', onError); + p.cancel = noop; + } + function onEvent(...args) { + cancel(); + resolve(args); + } + function onError(err) { + cancel(); + reject(err); + } + c = cancel; + emitter.on(name, onEvent); + emitter.on('error', onError); + }); + if (!c) { + throw new TypeError('Could not get `cancel()` function'); + } + p.cancel = c; + return p; + } + once.spread = spread; +})(once || (once = {})); +module.exports = once; +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ "../../../.yarn/berry/cache/agent-base-npm-6.0.2-428f325a93-9.zip/node_modules/agent-base/dist/src/index.js": +/*!******************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/agent-base-npm-6.0.2-428f325a93-9.zip/node_modules/agent-base/dist/src/index.js ***! + \******************************************************************************************************************/ +/***/ (function(module, __unused_webpack_exports, __webpack_require__) { + +"use strict"; + +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +const events_1 = __webpack_require__(/*! events */ "events"); +const debug_1 = __importDefault(__webpack_require__(/*! debug */ "./.yarn/__virtual__/debug-virtual-450dae1bfe/4/.yarn/berry/cache/debug-npm-4.3.4-4513954577-9.zip/node_modules/debug/src/index.js")); +const promisify_1 = __importDefault(__webpack_require__(/*! ./promisify */ "../../../.yarn/berry/cache/agent-base-npm-6.0.2-428f325a93-9.zip/node_modules/agent-base/dist/src/promisify.js")); +const debug = debug_1.default('agent-base'); +function isAgent(v) { + return Boolean(v) && typeof v.addRequest === 'function'; +} +function isSecureEndpoint() { + const { stack } = new Error(); + if (typeof stack !== 'string') + return false; + return stack.split('\n').some(l => l.indexOf('(https.js:') !== -1 || l.indexOf('node:https:') !== -1); +} +function createAgent(callback, opts) { + return new createAgent.Agent(callback, opts); +} +(function (createAgent) { + /** + * Base `http.Agent` implementation. + * No pooling/keep-alive is implemented by default. + * + * @param {Function} callback + * @api public + */ + class Agent extends events_1.EventEmitter { + constructor(callback, _opts) { + super(); + let opts = _opts; + if (typeof callback === 'function') { + this.callback = callback; + } + else if (callback) { + opts = callback; + } + // Timeout for the socket to be returned from the callback + this.timeout = null; + if (opts && typeof opts.timeout === 'number') { + this.timeout = opts.timeout; + } + // These aren't actually used by `agent-base`, but are required + // for the TypeScript definition files in `@types/node` :/ + this.maxFreeSockets = 1; + this.maxSockets = 1; + this.maxTotalSockets = Infinity; + this.sockets = {}; + this.freeSockets = {}; + this.requests = {}; + this.options = {}; + } + get defaultPort() { + if (typeof this.explicitDefaultPort === 'number') { + return this.explicitDefaultPort; + } + return isSecureEndpoint() ? 443 : 80; + } + set defaultPort(v) { + this.explicitDefaultPort = v; + } + get protocol() { + if (typeof this.explicitProtocol === 'string') { + return this.explicitProtocol; + } + return isSecureEndpoint() ? 'https:' : 'http:'; + } + set protocol(v) { + this.explicitProtocol = v; + } + callback(req, opts, fn) { + throw new Error('"agent-base" has no default implementation, you must subclass and override `callback()`'); + } + /** + * Called by node-core's "_http_client.js" module when creating + * a new HTTP request with this Agent instance. + * + * @api public + */ + addRequest(req, _opts) { + const opts = Object.assign({}, _opts); + if (typeof opts.secureEndpoint !== 'boolean') { + opts.secureEndpoint = isSecureEndpoint(); + } + if (opts.host == null) { + opts.host = 'localhost'; + } + if (opts.port == null) { + opts.port = opts.secureEndpoint ? 443 : 80; + } + if (opts.protocol == null) { + opts.protocol = opts.secureEndpoint ? 'https:' : 'http:'; + } + if (opts.host && opts.path) { + // If both a `host` and `path` are specified then it's most + // likely the result of a `url.parse()` call... we need to + // remove the `path` portion so that `net.connect()` doesn't + // attempt to open that as a unix socket file. + delete opts.path; + } + delete opts.agent; + delete opts.hostname; + delete opts._defaultAgent; + delete opts.defaultPort; + delete opts.createConnection; + // Hint to use "Connection: close" + // XXX: non-documented `http` module API :( + req._last = true; + req.shouldKeepAlive = false; + let timedOut = false; + let timeoutId = null; + const timeoutMs = opts.timeout || this.timeout; + const onerror = (err) => { + if (req._hadError) + return; + req.emit('error', err); + // For Safety. Some additional errors might fire later on + // and we need to make sure we don't double-fire the error event. + req._hadError = true; + }; + const ontimeout = () => { + timeoutId = null; + timedOut = true; + const err = new Error(`A "socket" was not created for HTTP request before ${timeoutMs}ms`); + err.code = 'ETIMEOUT'; + onerror(err); + }; + const callbackError = (err) => { + if (timedOut) + return; + if (timeoutId !== null) { + clearTimeout(timeoutId); + timeoutId = null; + } + onerror(err); + }; + const onsocket = (socket) => { + if (timedOut) + return; + if (timeoutId != null) { + clearTimeout(timeoutId); + timeoutId = null; + } + if (isAgent(socket)) { + // `socket` is actually an `http.Agent` instance, so + // relinquish responsibility for this `req` to the Agent + // from here on + debug('Callback returned another Agent instance %o', socket.constructor.name); + socket.addRequest(req, opts); + return; + } + if (socket) { + socket.once('free', () => { + this.freeSocket(socket, opts); + }); + req.onSocket(socket); + return; + } + const err = new Error(`no Duplex stream was returned to agent-base for \`${req.method} ${req.path}\``); + onerror(err); + }; + if (typeof this.callback !== 'function') { + onerror(new Error('`callback` is not defined')); + return; + } + if (!this.promisifiedCallback) { + if (this.callback.length >= 3) { + debug('Converting legacy callback function to promise'); + this.promisifiedCallback = promisify_1.default(this.callback); + } + else { + this.promisifiedCallback = this.callback; + } + } + if (typeof timeoutMs === 'number' && timeoutMs > 0) { + timeoutId = setTimeout(ontimeout, timeoutMs); + } + if ('port' in opts && typeof opts.port !== 'number') { + opts.port = Number(opts.port); + } + try { + debug('Resolving socket for %o request: %o', opts.protocol, `${req.method} ${req.path}`); + Promise.resolve(this.promisifiedCallback(req, opts)).then(onsocket, callbackError); + } + catch (err) { + Promise.reject(err).catch(callbackError); + } + } + freeSocket(socket, opts) { + debug('Freeing socket %o %o', socket.constructor.name, opts); + socket.destroy(); + } + destroy() { + debug('Destroying agent %o', this.constructor.name); + } + } + createAgent.Agent = Agent; + // So that `instanceof` works correctly + createAgent.prototype = createAgent.Agent.prototype; +})(createAgent || (createAgent = {})); +module.exports = createAgent; +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ "../../../.yarn/berry/cache/agent-base-npm-6.0.2-428f325a93-9.zip/node_modules/agent-base/dist/src/promisify.js": +/*!**********************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/agent-base-npm-6.0.2-428f325a93-9.zip/node_modules/agent-base/dist/src/promisify.js ***! + \**********************************************************************************************************************/ +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +function promisify(fn) { + return function (req, opts) { + return new Promise((resolve, reject) => { + fn.call(this, req, opts, (err, rtn) => { + if (err) { + reject(err); + } + else { + resolve(rtn); + } + }); + }); + }; +} +exports["default"] = promisify; +//# sourceMappingURL=promisify.js.map + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/babel-core.js": +/*!*****************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/babel-core.js ***! + \*****************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +var es7_1 = tslib_1.__importDefault(__webpack_require__(/*! ./es7 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js")); +function default_1(fork) { + fork.use(es7_1.default); + var types = fork.use(types_1.default); + var defaults = fork.use(shared_1.default).defaults; + var def = types.Type.def; + var or = types.Type.or; + def("Noop") + .bases("Statement") + .build(); + def("DoExpression") + .bases("Expression") + .build("body") + .field("body", [def("Statement")]); + def("Super") + .bases("Expression") + .build(); + def("BindExpression") + .bases("Expression") + .build("object", "callee") + .field("object", or(def("Expression"), null)) + .field("callee", def("Expression")); + def("Decorator") + .bases("Node") + .build("expression") + .field("expression", def("Expression")); + def("Property") + .field("decorators", or([def("Decorator")], null), defaults["null"]); + def("MethodDefinition") + .field("decorators", or([def("Decorator")], null), defaults["null"]); + def("MetaProperty") + .bases("Expression") + .build("meta", "property") + .field("meta", def("Identifier")) + .field("property", def("Identifier")); + def("ParenthesizedExpression") + .bases("Expression") + .build("expression") + .field("expression", def("Expression")); + def("ImportSpecifier") + .bases("ModuleSpecifier") + .build("imported", "local") + .field("imported", def("Identifier")); + def("ImportDefaultSpecifier") + .bases("ModuleSpecifier") + .build("local"); + def("ImportNamespaceSpecifier") + .bases("ModuleSpecifier") + .build("local"); + def("ExportDefaultDeclaration") + .bases("Declaration") + .build("declaration") + .field("declaration", or(def("Declaration"), def("Expression"))); + def("ExportNamedDeclaration") + .bases("Declaration") + .build("declaration", "specifiers", "source") + .field("declaration", or(def("Declaration"), null)) + .field("specifiers", [def("ExportSpecifier")], defaults.emptyArray) + .field("source", or(def("Literal"), null), defaults["null"]); + def("ExportSpecifier") + .bases("ModuleSpecifier") + .build("local", "exported") + .field("exported", def("Identifier")); + def("ExportNamespaceSpecifier") + .bases("Specifier") + .build("exported") + .field("exported", def("Identifier")); + def("ExportDefaultSpecifier") + .bases("Specifier") + .build("exported") + .field("exported", def("Identifier")); + def("ExportAllDeclaration") + .bases("Declaration") + .build("exported", "source") + .field("exported", or(def("Identifier"), null)) + .field("source", def("Literal")); + def("CommentBlock") + .bases("Comment") + .build("value", /*optional:*/ "leading", "trailing"); + def("CommentLine") + .bases("Comment") + .build("value", /*optional:*/ "leading", "trailing"); + def("Directive") + .bases("Node") + .build("value") + .field("value", def("DirectiveLiteral")); + def("DirectiveLiteral") + .bases("Node", "Expression") + .build("value") + .field("value", String, defaults["use strict"]); + def("InterpreterDirective") + .bases("Node") + .build("value") + .field("value", String); + def("BlockStatement") + .bases("Statement") + .build("body") + .field("body", [def("Statement")]) + .field("directives", [def("Directive")], defaults.emptyArray); + def("Program") + .bases("Node") + .build("body") + .field("body", [def("Statement")]) + .field("directives", [def("Directive")], defaults.emptyArray) + .field("interpreter", or(def("InterpreterDirective"), null), defaults["null"]); + // Split Literal + def("StringLiteral") + .bases("Literal") + .build("value") + .field("value", String); + def("NumericLiteral") + .bases("Literal") + .build("value") + .field("value", Number) + .field("raw", or(String, null), defaults["null"]) + .field("extra", { + rawValue: Number, + raw: String + }, function getDefault() { + return { + rawValue: this.value, + raw: this.value + "" + }; + }); + def("BigIntLiteral") + .bases("Literal") + .build("value") + // Only String really seems appropriate here, since BigInt values + // often exceed the limits of JS numbers. + .field("value", or(String, Number)) + .field("extra", { + rawValue: String, + raw: String + }, function getDefault() { + return { + rawValue: String(this.value), + raw: this.value + "n" + }; + }); + def("NullLiteral") + .bases("Literal") + .build() + .field("value", null, defaults["null"]); + def("BooleanLiteral") + .bases("Literal") + .build("value") + .field("value", Boolean); + def("RegExpLiteral") + .bases("Literal") + .build("pattern", "flags") + .field("pattern", String) + .field("flags", String) + .field("value", RegExp, function () { + return new RegExp(this.pattern, this.flags); + }); + var ObjectExpressionProperty = or(def("Property"), def("ObjectMethod"), def("ObjectProperty"), def("SpreadProperty"), def("SpreadElement")); + // Split Property -> ObjectProperty and ObjectMethod + def("ObjectExpression") + .bases("Expression") + .build("properties") + .field("properties", [ObjectExpressionProperty]); + // ObjectMethod hoist .value properties to own properties + def("ObjectMethod") + .bases("Node", "Function") + .build("kind", "key", "params", "body", "computed") + .field("kind", or("method", "get", "set")) + .field("key", or(def("Literal"), def("Identifier"), def("Expression"))) + .field("params", [def("Pattern")]) + .field("body", def("BlockStatement")) + .field("computed", Boolean, defaults["false"]) + .field("generator", Boolean, defaults["false"]) + .field("async", Boolean, defaults["false"]) + .field("accessibility", // TypeScript + or(def("Literal"), null), defaults["null"]) + .field("decorators", or([def("Decorator")], null), defaults["null"]); + def("ObjectProperty") + .bases("Node") + .build("key", "value") + .field("key", or(def("Literal"), def("Identifier"), def("Expression"))) + .field("value", or(def("Expression"), def("Pattern"))) + .field("accessibility", // TypeScript + or(def("Literal"), null), defaults["null"]) + .field("computed", Boolean, defaults["false"]); + var ClassBodyElement = or(def("MethodDefinition"), def("VariableDeclarator"), def("ClassPropertyDefinition"), def("ClassProperty"), def("ClassPrivateProperty"), def("ClassMethod"), def("ClassPrivateMethod")); + // MethodDefinition -> ClassMethod + def("ClassBody") + .bases("Declaration") + .build("body") + .field("body", [ClassBodyElement]); + def("ClassMethod") + .bases("Declaration", "Function") + .build("kind", "key", "params", "body", "computed", "static") + .field("key", or(def("Literal"), def("Identifier"), def("Expression"))); + def("ClassPrivateMethod") + .bases("Declaration", "Function") + .build("key", "params", "body", "kind", "computed", "static") + .field("key", def("PrivateName")); + ["ClassMethod", + "ClassPrivateMethod", + ].forEach(function (typeName) { + def(typeName) + .field("kind", or("get", "set", "method", "constructor"), function () { return "method"; }) + .field("body", def("BlockStatement")) + .field("computed", Boolean, defaults["false"]) + .field("static", or(Boolean, null), defaults["null"]) + .field("abstract", or(Boolean, null), defaults["null"]) + .field("access", or("public", "private", "protected", null), defaults["null"]) + .field("accessibility", or("public", "private", "protected", null), defaults["null"]) + .field("decorators", or([def("Decorator")], null), defaults["null"]) + .field("optional", or(Boolean, null), defaults["null"]); + }); + def("ClassPrivateProperty") + .bases("ClassProperty") + .build("key", "value") + .field("key", def("PrivateName")) + .field("value", or(def("Expression"), null), defaults["null"]); + def("PrivateName") + .bases("Expression", "Pattern") + .build("id") + .field("id", def("Identifier")); + var ObjectPatternProperty = or(def("Property"), def("PropertyPattern"), def("SpreadPropertyPattern"), def("SpreadProperty"), // Used by Esprima + def("ObjectProperty"), // Babel 6 + def("RestProperty") // Babel 6 + ); + // Split into RestProperty and SpreadProperty + def("ObjectPattern") + .bases("Pattern") + .build("properties") + .field("properties", [ObjectPatternProperty]) + .field("decorators", or([def("Decorator")], null), defaults["null"]); + def("SpreadProperty") + .bases("Node") + .build("argument") + .field("argument", def("Expression")); + def("RestProperty") + .bases("Node") + .build("argument") + .field("argument", def("Expression")); + def("ForAwaitStatement") + .bases("Statement") + .build("left", "right", "body") + .field("left", or(def("VariableDeclaration"), def("Expression"))) + .field("right", def("Expression")) + .field("body", def("Statement")); + // The callee node of a dynamic import(...) expression. + def("Import") + .bases("Expression") + .build(); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/babel.js": +/*!************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/babel.js ***! + \************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var babel_core_1 = tslib_1.__importDefault(__webpack_require__(/*! ./babel-core */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/babel-core.js")); +var flow_1 = tslib_1.__importDefault(__webpack_require__(/*! ./flow */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/flow.js")); +function default_1(fork) { + fork.use(babel_core_1.default); + fork.use(flow_1.default); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/core.js": +/*!***********************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/core.js ***! + \***********************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + var types = fork.use(types_1.default); + var Type = types.Type; + var def = Type.def; + var or = Type.or; + var shared = fork.use(shared_1.default); + var defaults = shared.defaults; + var geq = shared.geq; + // Abstract supertype of all syntactic entities that are allowed to have a + // .loc field. + def("Printable") + .field("loc", or(def("SourceLocation"), null), defaults["null"], true); + def("Node") + .bases("Printable") + .field("type", String) + .field("comments", or([def("Comment")], null), defaults["null"], true); + def("SourceLocation") + .field("start", def("Position")) + .field("end", def("Position")) + .field("source", or(String, null), defaults["null"]); + def("Position") + .field("line", geq(1)) + .field("column", geq(0)); + def("File") + .bases("Node") + .build("program", "name") + .field("program", def("Program")) + .field("name", or(String, null), defaults["null"]); + def("Program") + .bases("Node") + .build("body") + .field("body", [def("Statement")]); + def("Function") + .bases("Node") + .field("id", or(def("Identifier"), null), defaults["null"]) + .field("params", [def("Pattern")]) + .field("body", def("BlockStatement")) + .field("generator", Boolean, defaults["false"]) + .field("async", Boolean, defaults["false"]); + def("Statement").bases("Node"); + // The empty .build() here means that an EmptyStatement can be constructed + // (i.e. it's not abstract) but that it needs no arguments. + def("EmptyStatement").bases("Statement").build(); + def("BlockStatement") + .bases("Statement") + .build("body") + .field("body", [def("Statement")]); + // TODO Figure out how to silently coerce Expressions to + // ExpressionStatements where a Statement was expected. + def("ExpressionStatement") + .bases("Statement") + .build("expression") + .field("expression", def("Expression")); + def("IfStatement") + .bases("Statement") + .build("test", "consequent", "alternate") + .field("test", def("Expression")) + .field("consequent", def("Statement")) + .field("alternate", or(def("Statement"), null), defaults["null"]); + def("LabeledStatement") + .bases("Statement") + .build("label", "body") + .field("label", def("Identifier")) + .field("body", def("Statement")); + def("BreakStatement") + .bases("Statement") + .build("label") + .field("label", or(def("Identifier"), null), defaults["null"]); + def("ContinueStatement") + .bases("Statement") + .build("label") + .field("label", or(def("Identifier"), null), defaults["null"]); + def("WithStatement") + .bases("Statement") + .build("object", "body") + .field("object", def("Expression")) + .field("body", def("Statement")); + def("SwitchStatement") + .bases("Statement") + .build("discriminant", "cases", "lexical") + .field("discriminant", def("Expression")) + .field("cases", [def("SwitchCase")]) + .field("lexical", Boolean, defaults["false"]); + def("ReturnStatement") + .bases("Statement") + .build("argument") + .field("argument", or(def("Expression"), null)); + def("ThrowStatement") + .bases("Statement") + .build("argument") + .field("argument", def("Expression")); + def("TryStatement") + .bases("Statement") + .build("block", "handler", "finalizer") + .field("block", def("BlockStatement")) + .field("handler", or(def("CatchClause"), null), function () { + return this.handlers && this.handlers[0] || null; + }) + .field("handlers", [def("CatchClause")], function () { + return this.handler ? [this.handler] : []; + }, true) // Indicates this field is hidden from eachField iteration. + .field("guardedHandlers", [def("CatchClause")], defaults.emptyArray) + .field("finalizer", or(def("BlockStatement"), null), defaults["null"]); + def("CatchClause") + .bases("Node") + .build("param", "guard", "body") + // https://github.com/tc39/proposal-optional-catch-binding + .field("param", or(def("Pattern"), null), defaults["null"]) + .field("guard", or(def("Expression"), null), defaults["null"]) + .field("body", def("BlockStatement")); + def("WhileStatement") + .bases("Statement") + .build("test", "body") + .field("test", def("Expression")) + .field("body", def("Statement")); + def("DoWhileStatement") + .bases("Statement") + .build("body", "test") + .field("body", def("Statement")) + .field("test", def("Expression")); + def("ForStatement") + .bases("Statement") + .build("init", "test", "update", "body") + .field("init", or(def("VariableDeclaration"), def("Expression"), null)) + .field("test", or(def("Expression"), null)) + .field("update", or(def("Expression"), null)) + .field("body", def("Statement")); + def("ForInStatement") + .bases("Statement") + .build("left", "right", "body") + .field("left", or(def("VariableDeclaration"), def("Expression"))) + .field("right", def("Expression")) + .field("body", def("Statement")); + def("DebuggerStatement").bases("Statement").build(); + def("Declaration").bases("Statement"); + def("FunctionDeclaration") + .bases("Function", "Declaration") + .build("id", "params", "body") + .field("id", def("Identifier")); + def("FunctionExpression") + .bases("Function", "Expression") + .build("id", "params", "body"); + def("VariableDeclaration") + .bases("Declaration") + .build("kind", "declarations") + .field("kind", or("var", "let", "const")) + .field("declarations", [def("VariableDeclarator")]); + def("VariableDeclarator") + .bases("Node") + .build("id", "init") + .field("id", def("Pattern")) + .field("init", or(def("Expression"), null), defaults["null"]); + def("Expression").bases("Node"); + def("ThisExpression").bases("Expression").build(); + def("ArrayExpression") + .bases("Expression") + .build("elements") + .field("elements", [or(def("Expression"), null)]); + def("ObjectExpression") + .bases("Expression") + .build("properties") + .field("properties", [def("Property")]); + // TODO Not in the Mozilla Parser API, but used by Esprima. + def("Property") + .bases("Node") // Want to be able to visit Property Nodes. + .build("kind", "key", "value") + .field("kind", or("init", "get", "set")) + .field("key", or(def("Literal"), def("Identifier"))) + .field("value", def("Expression")); + def("SequenceExpression") + .bases("Expression") + .build("expressions") + .field("expressions", [def("Expression")]); + var UnaryOperator = or("-", "+", "!", "~", "typeof", "void", "delete"); + def("UnaryExpression") + .bases("Expression") + .build("operator", "argument", "prefix") + .field("operator", UnaryOperator) + .field("argument", def("Expression")) + // Esprima doesn't bother with this field, presumably because it's + // always true for unary operators. + .field("prefix", Boolean, defaults["true"]); + var BinaryOperator = or("==", "!=", "===", "!==", "<", "<=", ">", ">=", "<<", ">>", ">>>", "+", "-", "*", "/", "%", "**", "&", // TODO Missing from the Parser API. + "|", "^", "in", "instanceof"); + def("BinaryExpression") + .bases("Expression") + .build("operator", "left", "right") + .field("operator", BinaryOperator) + .field("left", def("Expression")) + .field("right", def("Expression")); + var AssignmentOperator = or("=", "+=", "-=", "*=", "/=", "%=", "<<=", ">>=", ">>>=", "|=", "^=", "&="); + def("AssignmentExpression") + .bases("Expression") + .build("operator", "left", "right") + .field("operator", AssignmentOperator) + .field("left", or(def("Pattern"), def("MemberExpression"))) + .field("right", def("Expression")); + var UpdateOperator = or("++", "--"); + def("UpdateExpression") + .bases("Expression") + .build("operator", "argument", "prefix") + .field("operator", UpdateOperator) + .field("argument", def("Expression")) + .field("prefix", Boolean); + var LogicalOperator = or("||", "&&"); + def("LogicalExpression") + .bases("Expression") + .build("operator", "left", "right") + .field("operator", LogicalOperator) + .field("left", def("Expression")) + .field("right", def("Expression")); + def("ConditionalExpression") + .bases("Expression") + .build("test", "consequent", "alternate") + .field("test", def("Expression")) + .field("consequent", def("Expression")) + .field("alternate", def("Expression")); + def("NewExpression") + .bases("Expression") + .build("callee", "arguments") + .field("callee", def("Expression")) + // The Mozilla Parser API gives this type as [or(def("Expression"), + // null)], but null values don't really make sense at the call site. + // TODO Report this nonsense. + .field("arguments", [def("Expression")]); + def("CallExpression") + .bases("Expression") + .build("callee", "arguments") + .field("callee", def("Expression")) + // See comment for NewExpression above. + .field("arguments", [def("Expression")]); + def("MemberExpression") + .bases("Expression") + .build("object", "property", "computed") + .field("object", def("Expression")) + .field("property", or(def("Identifier"), def("Expression"))) + .field("computed", Boolean, function () { + var type = this.property.type; + if (type === 'Literal' || + type === 'MemberExpression' || + type === 'BinaryExpression') { + return true; + } + return false; + }); + def("Pattern").bases("Node"); + def("SwitchCase") + .bases("Node") + .build("test", "consequent") + .field("test", or(def("Expression"), null)) + .field("consequent", [def("Statement")]); + def("Identifier") + .bases("Expression", "Pattern") + .build("name") + .field("name", String) + .field("optional", Boolean, defaults["false"]); + def("Literal") + .bases("Expression") + .build("value") + .field("value", or(String, Boolean, null, Number, RegExp)) + .field("regex", or({ + pattern: String, + flags: String + }, null), function () { + if (this.value instanceof RegExp) { + var flags = ""; + if (this.value.ignoreCase) + flags += "i"; + if (this.value.multiline) + flags += "m"; + if (this.value.global) + flags += "g"; + return { + pattern: this.value.source, + flags: flags + }; + } + return null; + }); + // Abstract (non-buildable) comment supertype. Not a Node. + def("Comment") + .bases("Printable") + .field("value", String) + // A .leading comment comes before the node, whereas a .trailing + // comment comes after it. These two fields should not both be true, + // but they might both be false when the comment falls inside a node + // and the node has no children for the comment to lead or trail, + // e.g. { /*dangling*/ }. + .field("leading", Boolean, defaults["true"]) + .field("trailing", Boolean, defaults["false"]); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es-proposals.js": +/*!*******************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es-proposals.js ***! + \*******************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +var core_1 = tslib_1.__importDefault(__webpack_require__(/*! ./core */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/core.js")); +function default_1(fork) { + fork.use(core_1.default); + var types = fork.use(types_1.default); + var Type = types.Type; + var def = types.Type.def; + var or = Type.or; + var shared = fork.use(shared_1.default); + var defaults = shared.defaults; + // https://github.com/tc39/proposal-optional-chaining + // `a?.b` as per https://github.com/estree/estree/issues/146 + def("OptionalMemberExpression") + .bases("MemberExpression") + .build("object", "property", "computed", "optional") + .field("optional", Boolean, defaults["true"]); + // a?.b() + def("OptionalCallExpression") + .bases("CallExpression") + .build("callee", "arguments", "optional") + .field("optional", Boolean, defaults["true"]); + // https://github.com/tc39/proposal-nullish-coalescing + // `a ?? b` as per https://github.com/babel/babylon/pull/761/files + var LogicalOperator = or("||", "&&", "??"); + def("LogicalExpression") + .field("operator", LogicalOperator); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es2020.js": +/*!*************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es2020.js ***! + \*************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var es7_1 = tslib_1.__importDefault(__webpack_require__(/*! ./es7 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js")); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +function default_1(fork) { + fork.use(es7_1.default); + var types = fork.use(types_1.default); + var def = types.Type.def; + def("ImportExpression") + .bases("Expression") + .build("source") + .field("source", def("Expression")); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es6.js": +/*!**********************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es6.js ***! + \**********************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var core_1 = tslib_1.__importDefault(__webpack_require__(/*! ./core */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/core.js")); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + fork.use(core_1.default); + var types = fork.use(types_1.default); + var def = types.Type.def; + var or = types.Type.or; + var defaults = fork.use(shared_1.default).defaults; + def("Function") + .field("generator", Boolean, defaults["false"]) + .field("expression", Boolean, defaults["false"]) + .field("defaults", [or(def("Expression"), null)], defaults.emptyArray) + // TODO This could be represented as a RestElement in .params. + .field("rest", or(def("Identifier"), null), defaults["null"]); + // The ESTree way of representing a ...rest parameter. + def("RestElement") + .bases("Pattern") + .build("argument") + .field("argument", def("Pattern")) + .field("typeAnnotation", // for Babylon. Flow parser puts it on the identifier + or(def("TypeAnnotation"), def("TSTypeAnnotation"), null), defaults["null"]); + def("SpreadElementPattern") + .bases("Pattern") + .build("argument") + .field("argument", def("Pattern")); + def("FunctionDeclaration") + .build("id", "params", "body", "generator", "expression"); + def("FunctionExpression") + .build("id", "params", "body", "generator", "expression"); + // The Parser API calls this ArrowExpression, but Esprima and all other + // actual parsers use ArrowFunctionExpression. + def("ArrowFunctionExpression") + .bases("Function", "Expression") + .build("params", "body", "expression") + // The forced null value here is compatible with the overridden + // definition of the "id" field in the Function interface. + .field("id", null, defaults["null"]) + // Arrow function bodies are allowed to be expressions. + .field("body", or(def("BlockStatement"), def("Expression"))) + // The current spec forbids arrow generators, so I have taken the + // liberty of enforcing that. TODO Report this. + .field("generator", false, defaults["false"]); + def("ForOfStatement") + .bases("Statement") + .build("left", "right", "body") + .field("left", or(def("VariableDeclaration"), def("Pattern"))) + .field("right", def("Expression")) + .field("body", def("Statement")); + def("YieldExpression") + .bases("Expression") + .build("argument", "delegate") + .field("argument", or(def("Expression"), null)) + .field("delegate", Boolean, defaults["false"]); + def("GeneratorExpression") + .bases("Expression") + .build("body", "blocks", "filter") + .field("body", def("Expression")) + .field("blocks", [def("ComprehensionBlock")]) + .field("filter", or(def("Expression"), null)); + def("ComprehensionExpression") + .bases("Expression") + .build("body", "blocks", "filter") + .field("body", def("Expression")) + .field("blocks", [def("ComprehensionBlock")]) + .field("filter", or(def("Expression"), null)); + def("ComprehensionBlock") + .bases("Node") + .build("left", "right", "each") + .field("left", def("Pattern")) + .field("right", def("Expression")) + .field("each", Boolean); + def("Property") + .field("key", or(def("Literal"), def("Identifier"), def("Expression"))) + .field("value", or(def("Expression"), def("Pattern"))) + .field("method", Boolean, defaults["false"]) + .field("shorthand", Boolean, defaults["false"]) + .field("computed", Boolean, defaults["false"]); + def("ObjectProperty") + .field("shorthand", Boolean, defaults["false"]); + def("PropertyPattern") + .bases("Pattern") + .build("key", "pattern") + .field("key", or(def("Literal"), def("Identifier"), def("Expression"))) + .field("pattern", def("Pattern")) + .field("computed", Boolean, defaults["false"]); + def("ObjectPattern") + .bases("Pattern") + .build("properties") + .field("properties", [or(def("PropertyPattern"), def("Property"))]); + def("ArrayPattern") + .bases("Pattern") + .build("elements") + .field("elements", [or(def("Pattern"), null)]); + def("MethodDefinition") + .bases("Declaration") + .build("kind", "key", "value", "static") + .field("kind", or("constructor", "method", "get", "set")) + .field("key", def("Expression")) + .field("value", def("Function")) + .field("computed", Boolean, defaults["false"]) + .field("static", Boolean, defaults["false"]); + def("SpreadElement") + .bases("Node") + .build("argument") + .field("argument", def("Expression")); + def("ArrayExpression") + .field("elements", [or(def("Expression"), def("SpreadElement"), def("RestElement"), null)]); + def("NewExpression") + .field("arguments", [or(def("Expression"), def("SpreadElement"))]); + def("CallExpression") + .field("arguments", [or(def("Expression"), def("SpreadElement"))]); + // Note: this node type is *not* an AssignmentExpression with a Pattern on + // the left-hand side! The existing AssignmentExpression type already + // supports destructuring assignments. AssignmentPattern nodes may appear + // wherever a Pattern is allowed, and the right-hand side represents a + // default value to be destructured against the left-hand side, if no + // value is otherwise provided. For example: default parameter values. + def("AssignmentPattern") + .bases("Pattern") + .build("left", "right") + .field("left", def("Pattern")) + .field("right", def("Expression")); + var ClassBodyElement = or(def("MethodDefinition"), def("VariableDeclarator"), def("ClassPropertyDefinition"), def("ClassProperty")); + def("ClassProperty") + .bases("Declaration") + .build("key") + .field("key", or(def("Literal"), def("Identifier"), def("Expression"))) + .field("computed", Boolean, defaults["false"]); + def("ClassPropertyDefinition") // static property + .bases("Declaration") + .build("definition") + // Yes, Virginia, circular definitions are permitted. + .field("definition", ClassBodyElement); + def("ClassBody") + .bases("Declaration") + .build("body") + .field("body", [ClassBodyElement]); + def("ClassDeclaration") + .bases("Declaration") + .build("id", "body", "superClass") + .field("id", or(def("Identifier"), null)) + .field("body", def("ClassBody")) + .field("superClass", or(def("Expression"), null), defaults["null"]); + def("ClassExpression") + .bases("Expression") + .build("id", "body", "superClass") + .field("id", or(def("Identifier"), null), defaults["null"]) + .field("body", def("ClassBody")) + .field("superClass", or(def("Expression"), null), defaults["null"]); + // Specifier and ModuleSpecifier are abstract non-standard types + // introduced for definitional convenience. + def("Specifier").bases("Node"); + // This supertype is shared/abused by both def/babel.js and + // def/esprima.js. In the future, it will be possible to load only one set + // of definitions appropriate for a given parser, but until then we must + // rely on default functions to reconcile the conflicting AST formats. + def("ModuleSpecifier") + .bases("Specifier") + // This local field is used by Babel/Acorn. It should not technically + // be optional in the Babel/Acorn AST format, but it must be optional + // in the Esprima AST format. + .field("local", or(def("Identifier"), null), defaults["null"]) + // The id and name fields are used by Esprima. The id field should not + // technically be optional in the Esprima AST format, but it must be + // optional in the Babel/Acorn AST format. + .field("id", or(def("Identifier"), null), defaults["null"]) + .field("name", or(def("Identifier"), null), defaults["null"]); + // Like ModuleSpecifier, except type:"ImportSpecifier" and buildable. + // import {} from ...; + def("ImportSpecifier") + .bases("ModuleSpecifier") + .build("id", "name"); + // import <* as id> from ...; + def("ImportNamespaceSpecifier") + .bases("ModuleSpecifier") + .build("id"); + // import from ...; + def("ImportDefaultSpecifier") + .bases("ModuleSpecifier") + .build("id"); + def("ImportDeclaration") + .bases("Declaration") + .build("specifiers", "source", "importKind") + .field("specifiers", [or(def("ImportSpecifier"), def("ImportNamespaceSpecifier"), def("ImportDefaultSpecifier"))], defaults.emptyArray) + .field("source", def("Literal")) + .field("importKind", or("value", "type"), function () { + return "value"; + }); + def("TaggedTemplateExpression") + .bases("Expression") + .build("tag", "quasi") + .field("tag", def("Expression")) + .field("quasi", def("TemplateLiteral")); + def("TemplateLiteral") + .bases("Expression") + .build("quasis", "expressions") + .field("quasis", [def("TemplateElement")]) + .field("expressions", [def("Expression")]); + def("TemplateElement") + .bases("Node") + .build("value", "tail") + .field("value", { "cooked": String, "raw": String }) + .field("tail", Boolean); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js": +/*!**********************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js ***! + \**********************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var es6_1 = tslib_1.__importDefault(__webpack_require__(/*! ./es6 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es6.js")); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + fork.use(es6_1.default); + var types = fork.use(types_1.default); + var def = types.Type.def; + var or = types.Type.or; + var defaults = fork.use(shared_1.default).defaults; + def("Function") + .field("async", Boolean, defaults["false"]); + def("SpreadProperty") + .bases("Node") + .build("argument") + .field("argument", def("Expression")); + def("ObjectExpression") + .field("properties", [or(def("Property"), def("SpreadProperty"), def("SpreadElement"))]); + def("SpreadPropertyPattern") + .bases("Pattern") + .build("argument") + .field("argument", def("Pattern")); + def("ObjectPattern") + .field("properties", [or(def("Property"), def("PropertyPattern"), def("SpreadPropertyPattern"))]); + def("AwaitExpression") + .bases("Expression") + .build("argument", "all") + .field("argument", or(def("Expression"), null)) + .field("all", Boolean, defaults["false"]); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/esprima.js": +/*!**************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/esprima.js ***! + \**************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var es7_1 = tslib_1.__importDefault(__webpack_require__(/*! ./es7 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js")); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + fork.use(es7_1.default); + var types = fork.use(types_1.default); + var defaults = fork.use(shared_1.default).defaults; + var def = types.Type.def; + var or = types.Type.or; + def("VariableDeclaration") + .field("declarations", [or(def("VariableDeclarator"), def("Identifier") // Esprima deviation. + )]); + def("Property") + .field("value", or(def("Expression"), def("Pattern") // Esprima deviation. + )); + def("ArrayPattern") + .field("elements", [or(def("Pattern"), def("SpreadElement"), null)]); + def("ObjectPattern") + .field("properties", [or(def("Property"), def("PropertyPattern"), def("SpreadPropertyPattern"), def("SpreadProperty") // Used by Esprima. + )]); + // Like ModuleSpecifier, except type:"ExportSpecifier" and buildable. + // export {} [from ...]; + def("ExportSpecifier") + .bases("ModuleSpecifier") + .build("id", "name"); + // export <*> from ...; + def("ExportBatchSpecifier") + .bases("Specifier") + .build(); + def("ExportDeclaration") + .bases("Declaration") + .build("default", "declaration", "specifiers", "source") + .field("default", Boolean) + .field("declaration", or(def("Declaration"), def("Expression"), // Implies default. + null)) + .field("specifiers", [or(def("ExportSpecifier"), def("ExportBatchSpecifier"))], defaults.emptyArray) + .field("source", or(def("Literal"), null), defaults["null"]); + def("Block") + .bases("Comment") + .build("value", /*optional:*/ "leading", "trailing"); + def("Line") + .bases("Comment") + .build("value", /*optional:*/ "leading", "trailing"); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/flow.js": +/*!***********************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/flow.js ***! + \***********************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var es7_1 = tslib_1.__importDefault(__webpack_require__(/*! ./es7 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js")); +var type_annotations_1 = tslib_1.__importDefault(__webpack_require__(/*! ./type-annotations */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/type-annotations.js")); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + fork.use(es7_1.default); + fork.use(type_annotations_1.default); + var types = fork.use(types_1.default); + var def = types.Type.def; + var or = types.Type.or; + var defaults = fork.use(shared_1.default).defaults; + // Base types + def("Flow").bases("Node"); + def("FlowType").bases("Flow"); + // Type annotations + def("AnyTypeAnnotation") + .bases("FlowType") + .build(); + def("EmptyTypeAnnotation") + .bases("FlowType") + .build(); + def("MixedTypeAnnotation") + .bases("FlowType") + .build(); + def("VoidTypeAnnotation") + .bases("FlowType") + .build(); + def("NumberTypeAnnotation") + .bases("FlowType") + .build(); + def("NumberLiteralTypeAnnotation") + .bases("FlowType") + .build("value", "raw") + .field("value", Number) + .field("raw", String); + // Babylon 6 differs in AST from Flow + // same as NumberLiteralTypeAnnotation + def("NumericLiteralTypeAnnotation") + .bases("FlowType") + .build("value", "raw") + .field("value", Number) + .field("raw", String); + def("StringTypeAnnotation") + .bases("FlowType") + .build(); + def("StringLiteralTypeAnnotation") + .bases("FlowType") + .build("value", "raw") + .field("value", String) + .field("raw", String); + def("BooleanTypeAnnotation") + .bases("FlowType") + .build(); + def("BooleanLiteralTypeAnnotation") + .bases("FlowType") + .build("value", "raw") + .field("value", Boolean) + .field("raw", String); + def("TypeAnnotation") + .bases("Node") + .build("typeAnnotation") + .field("typeAnnotation", def("FlowType")); + def("NullableTypeAnnotation") + .bases("FlowType") + .build("typeAnnotation") + .field("typeAnnotation", def("FlowType")); + def("NullLiteralTypeAnnotation") + .bases("FlowType") + .build(); + def("NullTypeAnnotation") + .bases("FlowType") + .build(); + def("ThisTypeAnnotation") + .bases("FlowType") + .build(); + def("ExistsTypeAnnotation") + .bases("FlowType") + .build(); + def("ExistentialTypeParam") + .bases("FlowType") + .build(); + def("FunctionTypeAnnotation") + .bases("FlowType") + .build("params", "returnType", "rest", "typeParameters") + .field("params", [def("FunctionTypeParam")]) + .field("returnType", def("FlowType")) + .field("rest", or(def("FunctionTypeParam"), null)) + .field("typeParameters", or(def("TypeParameterDeclaration"), null)); + def("FunctionTypeParam") + .bases("Node") + .build("name", "typeAnnotation", "optional") + .field("name", def("Identifier")) + .field("typeAnnotation", def("FlowType")) + .field("optional", Boolean); + def("ArrayTypeAnnotation") + .bases("FlowType") + .build("elementType") + .field("elementType", def("FlowType")); + def("ObjectTypeAnnotation") + .bases("FlowType") + .build("properties", "indexers", "callProperties") + .field("properties", [ + or(def("ObjectTypeProperty"), def("ObjectTypeSpreadProperty")) + ]) + .field("indexers", [def("ObjectTypeIndexer")], defaults.emptyArray) + .field("callProperties", [def("ObjectTypeCallProperty")], defaults.emptyArray) + .field("inexact", or(Boolean, void 0), defaults["undefined"]) + .field("exact", Boolean, defaults["false"]) + .field("internalSlots", [def("ObjectTypeInternalSlot")], defaults.emptyArray); + def("Variance") + .bases("Node") + .build("kind") + .field("kind", or("plus", "minus")); + var LegacyVariance = or(def("Variance"), "plus", "minus", null); + def("ObjectTypeProperty") + .bases("Node") + .build("key", "value", "optional") + .field("key", or(def("Literal"), def("Identifier"))) + .field("value", def("FlowType")) + .field("optional", Boolean) + .field("variance", LegacyVariance, defaults["null"]); + def("ObjectTypeIndexer") + .bases("Node") + .build("id", "key", "value") + .field("id", def("Identifier")) + .field("key", def("FlowType")) + .field("value", def("FlowType")) + .field("variance", LegacyVariance, defaults["null"]); + def("ObjectTypeCallProperty") + .bases("Node") + .build("value") + .field("value", def("FunctionTypeAnnotation")) + .field("static", Boolean, defaults["false"]); + def("QualifiedTypeIdentifier") + .bases("Node") + .build("qualification", "id") + .field("qualification", or(def("Identifier"), def("QualifiedTypeIdentifier"))) + .field("id", def("Identifier")); + def("GenericTypeAnnotation") + .bases("FlowType") + .build("id", "typeParameters") + .field("id", or(def("Identifier"), def("QualifiedTypeIdentifier"))) + .field("typeParameters", or(def("TypeParameterInstantiation"), null)); + def("MemberTypeAnnotation") + .bases("FlowType") + .build("object", "property") + .field("object", def("Identifier")) + .field("property", or(def("MemberTypeAnnotation"), def("GenericTypeAnnotation"))); + def("UnionTypeAnnotation") + .bases("FlowType") + .build("types") + .field("types", [def("FlowType")]); + def("IntersectionTypeAnnotation") + .bases("FlowType") + .build("types") + .field("types", [def("FlowType")]); + def("TypeofTypeAnnotation") + .bases("FlowType") + .build("argument") + .field("argument", def("FlowType")); + def("ObjectTypeSpreadProperty") + .bases("Node") + .build("argument") + .field("argument", def("FlowType")); + def("ObjectTypeInternalSlot") + .bases("Node") + .build("id", "value", "optional", "static", "method") + .field("id", def("Identifier")) + .field("value", def("FlowType")) + .field("optional", Boolean) + .field("static", Boolean) + .field("method", Boolean); + def("TypeParameterDeclaration") + .bases("Node") + .build("params") + .field("params", [def("TypeParameter")]); + def("TypeParameterInstantiation") + .bases("Node") + .build("params") + .field("params", [def("FlowType")]); + def("TypeParameter") + .bases("FlowType") + .build("name", "variance", "bound") + .field("name", String) + .field("variance", LegacyVariance, defaults["null"]) + .field("bound", or(def("TypeAnnotation"), null), defaults["null"]); + def("ClassProperty") + .field("variance", LegacyVariance, defaults["null"]); + def("ClassImplements") + .bases("Node") + .build("id") + .field("id", def("Identifier")) + .field("superClass", or(def("Expression"), null), defaults["null"]) + .field("typeParameters", or(def("TypeParameterInstantiation"), null), defaults["null"]); + def("InterfaceTypeAnnotation") + .bases("FlowType") + .build("body", "extends") + .field("body", def("ObjectTypeAnnotation")) + .field("extends", or([def("InterfaceExtends")], null), defaults["null"]); + def("InterfaceDeclaration") + .bases("Declaration") + .build("id", "body", "extends") + .field("id", def("Identifier")) + .field("typeParameters", or(def("TypeParameterDeclaration"), null), defaults["null"]) + .field("body", def("ObjectTypeAnnotation")) + .field("extends", [def("InterfaceExtends")]); + def("DeclareInterface") + .bases("InterfaceDeclaration") + .build("id", "body", "extends"); + def("InterfaceExtends") + .bases("Node") + .build("id") + .field("id", def("Identifier")) + .field("typeParameters", or(def("TypeParameterInstantiation"), null), defaults["null"]); + def("TypeAlias") + .bases("Declaration") + .build("id", "typeParameters", "right") + .field("id", def("Identifier")) + .field("typeParameters", or(def("TypeParameterDeclaration"), null)) + .field("right", def("FlowType")); + def("OpaqueType") + .bases("Declaration") + .build("id", "typeParameters", "impltype", "supertype") + .field("id", def("Identifier")) + .field("typeParameters", or(def("TypeParameterDeclaration"), null)) + .field("impltype", def("FlowType")) + .field("supertype", def("FlowType")); + def("DeclareTypeAlias") + .bases("TypeAlias") + .build("id", "typeParameters", "right"); + def("DeclareOpaqueType") + .bases("TypeAlias") + .build("id", "typeParameters", "supertype"); + def("TypeCastExpression") + .bases("Expression") + .build("expression", "typeAnnotation") + .field("expression", def("Expression")) + .field("typeAnnotation", def("TypeAnnotation")); + def("TupleTypeAnnotation") + .bases("FlowType") + .build("types") + .field("types", [def("FlowType")]); + def("DeclareVariable") + .bases("Statement") + .build("id") + .field("id", def("Identifier")); + def("DeclareFunction") + .bases("Statement") + .build("id") + .field("id", def("Identifier")); + def("DeclareClass") + .bases("InterfaceDeclaration") + .build("id"); + def("DeclareModule") + .bases("Statement") + .build("id", "body") + .field("id", or(def("Identifier"), def("Literal"))) + .field("body", def("BlockStatement")); + def("DeclareModuleExports") + .bases("Statement") + .build("typeAnnotation") + .field("typeAnnotation", def("TypeAnnotation")); + def("DeclareExportDeclaration") + .bases("Declaration") + .build("default", "declaration", "specifiers", "source") + .field("default", Boolean) + .field("declaration", or(def("DeclareVariable"), def("DeclareFunction"), def("DeclareClass"), def("FlowType"), // Implies default. + null)) + .field("specifiers", [or(def("ExportSpecifier"), def("ExportBatchSpecifier"))], defaults.emptyArray) + .field("source", or(def("Literal"), null), defaults["null"]); + def("DeclareExportAllDeclaration") + .bases("Declaration") + .build("source") + .field("source", or(def("Literal"), null), defaults["null"]); + def("FlowPredicate").bases("Flow"); + def("InferredPredicate") + .bases("FlowPredicate") + .build(); + def("DeclaredPredicate") + .bases("FlowPredicate") + .build("value") + .field("value", def("Expression")); + def("CallExpression") + .field("typeArguments", or(null, def("TypeParameterInstantiation")), defaults["null"]); + def("NewExpression") + .field("typeArguments", or(null, def("TypeParameterInstantiation")), defaults["null"]); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/jsx.js": +/*!**********************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/jsx.js ***! + \**********************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var es7_1 = tslib_1.__importDefault(__webpack_require__(/*! ./es7 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js")); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + fork.use(es7_1.default); + var types = fork.use(types_1.default); + var def = types.Type.def; + var or = types.Type.or; + var defaults = fork.use(shared_1.default).defaults; + def("JSXAttribute") + .bases("Node") + .build("name", "value") + .field("name", or(def("JSXIdentifier"), def("JSXNamespacedName"))) + .field("value", or(def("Literal"), // attr="value" + def("JSXExpressionContainer"), // attr={value} + null // attr= or just attr + ), defaults["null"]); + def("JSXIdentifier") + .bases("Identifier") + .build("name") + .field("name", String); + def("JSXNamespacedName") + .bases("Node") + .build("namespace", "name") + .field("namespace", def("JSXIdentifier")) + .field("name", def("JSXIdentifier")); + def("JSXMemberExpression") + .bases("MemberExpression") + .build("object", "property") + .field("object", or(def("JSXIdentifier"), def("JSXMemberExpression"))) + .field("property", def("JSXIdentifier")) + .field("computed", Boolean, defaults.false); + var JSXElementName = or(def("JSXIdentifier"), def("JSXNamespacedName"), def("JSXMemberExpression")); + def("JSXSpreadAttribute") + .bases("Node") + .build("argument") + .field("argument", def("Expression")); + var JSXAttributes = [or(def("JSXAttribute"), def("JSXSpreadAttribute"))]; + def("JSXExpressionContainer") + .bases("Expression") + .build("expression") + .field("expression", def("Expression")); + def("JSXElement") + .bases("Expression") + .build("openingElement", "closingElement", "children") + .field("openingElement", def("JSXOpeningElement")) + .field("closingElement", or(def("JSXClosingElement"), null), defaults["null"]) + .field("children", [or(def("JSXElement"), def("JSXExpressionContainer"), def("JSXFragment"), def("JSXText"), def("Literal") // TODO Esprima should return JSXText instead. + )], defaults.emptyArray) + .field("name", JSXElementName, function () { + // Little-known fact: the `this` object inside a default function + // is none other than the partially-built object itself, and any + // fields initialized directly from builder function arguments + // (like openingElement, closingElement, and children) are + // guaranteed to be available. + return this.openingElement.name; + }, true) // hidden from traversal + .field("selfClosing", Boolean, function () { + return this.openingElement.selfClosing; + }, true) // hidden from traversal + .field("attributes", JSXAttributes, function () { + return this.openingElement.attributes; + }, true); // hidden from traversal + def("JSXOpeningElement") + .bases("Node") // TODO Does this make sense? Can't really be an JSXElement. + .build("name", "attributes", "selfClosing") + .field("name", JSXElementName) + .field("attributes", JSXAttributes, defaults.emptyArray) + .field("selfClosing", Boolean, defaults["false"]); + def("JSXClosingElement") + .bases("Node") // TODO Same concern. + .build("name") + .field("name", JSXElementName); + def("JSXFragment") + .bases("Expression") + .build("openingElement", "closingElement", "children") + .field("openingElement", def("JSXOpeningFragment")) + .field("closingElement", def("JSXClosingFragment")) + .field("children", [or(def("JSXElement"), def("JSXExpressionContainer"), def("JSXFragment"), def("JSXText"), def("Literal") // TODO Esprima should return JSXText instead. + )], defaults.emptyArray); + def("JSXOpeningFragment") + .bases("Node") // TODO Same concern. + .build(); + def("JSXClosingFragment") + .bases("Node") // TODO Same concern. + .build(); + def("JSXText") + .bases("Literal") + .build("value") + .field("value", String); + def("JSXEmptyExpression").bases("Expression").build(); + // This PR has caused many people issues, but supporting it seems like a + // good idea anyway: https://github.com/babel/babel/pull/4988 + def("JSXSpreadChild") + .bases("Expression") + .build("expression") + .field("expression", def("Expression")); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/type-annotations.js": +/*!***********************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/type-annotations.js ***! + \***********************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +/** + * Type annotation defs shared between Flow and TypeScript. + * These defs could not be defined in ./flow.ts or ./typescript.ts directly + * because they use the same name. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + var types = fork.use(types_1.default); + var def = types.Type.def; + var or = types.Type.or; + var defaults = fork.use(shared_1.default).defaults; + var TypeAnnotation = or(def("TypeAnnotation"), def("TSTypeAnnotation"), null); + var TypeParamDecl = or(def("TypeParameterDeclaration"), def("TSTypeParameterDeclaration"), null); + def("Identifier") + .field("typeAnnotation", TypeAnnotation, defaults["null"]); + def("ObjectPattern") + .field("typeAnnotation", TypeAnnotation, defaults["null"]); + def("Function") + .field("returnType", TypeAnnotation, defaults["null"]) + .field("typeParameters", TypeParamDecl, defaults["null"]); + def("ClassProperty") + .build("key", "value", "typeAnnotation", "static") + .field("value", or(def("Expression"), null)) + .field("static", Boolean, defaults["false"]) + .field("typeAnnotation", TypeAnnotation, defaults["null"]); + ["ClassDeclaration", + "ClassExpression", + ].forEach(function (typeName) { + def(typeName) + .field("typeParameters", TypeParamDecl, defaults["null"]) + .field("superTypeParameters", or(def("TypeParameterInstantiation"), def("TSTypeParameterInstantiation"), null), defaults["null"]) + .field("implements", or([def("ClassImplements")], [def("TSExpressionWithTypeArguments")]), defaults.emptyArray); + }); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/typescript.js": +/*!*****************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/typescript.js ***! + \*****************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var babel_core_1 = tslib_1.__importDefault(__webpack_require__(/*! ./babel-core */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/babel-core.js")); +var type_annotations_1 = tslib_1.__importDefault(__webpack_require__(/*! ./type-annotations */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/type-annotations.js")); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var shared_1 = tslib_1.__importDefault(__webpack_require__(/*! ../lib/shared */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js")); +function default_1(fork) { + // Since TypeScript is parsed by Babylon, include the core Babylon types + // but omit the Flow-related types. + fork.use(babel_core_1.default); + fork.use(type_annotations_1.default); + var types = fork.use(types_1.default); + var n = types.namedTypes; + var def = types.Type.def; + var or = types.Type.or; + var defaults = fork.use(shared_1.default).defaults; + var StringLiteral = types.Type.from(function (value, deep) { + if (n.StringLiteral && + n.StringLiteral.check(value, deep)) { + return true; + } + if (n.Literal && + n.Literal.check(value, deep) && + typeof value.value === "string") { + return true; + } + return false; + }, "StringLiteral"); + def("TSType") + .bases("Node"); + var TSEntityName = or(def("Identifier"), def("TSQualifiedName")); + def("TSTypeReference") + .bases("TSType", "TSHasOptionalTypeParameterInstantiation") + .build("typeName", "typeParameters") + .field("typeName", TSEntityName); + // An abstract (non-buildable) base type that provide a commonly-needed + // optional .typeParameters field. + def("TSHasOptionalTypeParameterInstantiation") + .field("typeParameters", or(def("TSTypeParameterInstantiation"), null), defaults["null"]); + // An abstract (non-buildable) base type that provide a commonly-needed + // optional .typeParameters field. + def("TSHasOptionalTypeParameters") + .field("typeParameters", or(def("TSTypeParameterDeclaration"), null, void 0), defaults["null"]); + // An abstract (non-buildable) base type that provide a commonly-needed + // optional .typeAnnotation field. + def("TSHasOptionalTypeAnnotation") + .field("typeAnnotation", or(def("TSTypeAnnotation"), null), defaults["null"]); + def("TSQualifiedName") + .bases("Node") + .build("left", "right") + .field("left", TSEntityName) + .field("right", TSEntityName); + def("TSAsExpression") + .bases("Expression", "Pattern") + .build("expression", "typeAnnotation") + .field("expression", def("Expression")) + .field("typeAnnotation", def("TSType")) + .field("extra", or({ parenthesized: Boolean }, null), defaults["null"]); + def("TSNonNullExpression") + .bases("Expression", "Pattern") + .build("expression") + .field("expression", def("Expression")); + [ + "TSAnyKeyword", + "TSBigIntKeyword", + "TSBooleanKeyword", + "TSNeverKeyword", + "TSNullKeyword", + "TSNumberKeyword", + "TSObjectKeyword", + "TSStringKeyword", + "TSSymbolKeyword", + "TSUndefinedKeyword", + "TSUnknownKeyword", + "TSVoidKeyword", + "TSThisType", + ].forEach(function (keywordType) { + def(keywordType) + .bases("TSType") + .build(); + }); + def("TSArrayType") + .bases("TSType") + .build("elementType") + .field("elementType", def("TSType")); + def("TSLiteralType") + .bases("TSType") + .build("literal") + .field("literal", or(def("NumericLiteral"), def("StringLiteral"), def("BooleanLiteral"), def("TemplateLiteral"), def("UnaryExpression"))); + ["TSUnionType", + "TSIntersectionType", + ].forEach(function (typeName) { + def(typeName) + .bases("TSType") + .build("types") + .field("types", [def("TSType")]); + }); + def("TSConditionalType") + .bases("TSType") + .build("checkType", "extendsType", "trueType", "falseType") + .field("checkType", def("TSType")) + .field("extendsType", def("TSType")) + .field("trueType", def("TSType")) + .field("falseType", def("TSType")); + def("TSInferType") + .bases("TSType") + .build("typeParameter") + .field("typeParameter", def("TSTypeParameter")); + def("TSParenthesizedType") + .bases("TSType") + .build("typeAnnotation") + .field("typeAnnotation", def("TSType")); + var ParametersType = [or(def("Identifier"), def("RestElement"), def("ArrayPattern"), def("ObjectPattern"))]; + ["TSFunctionType", + "TSConstructorType", + ].forEach(function (typeName) { + def(typeName) + .bases("TSType", "TSHasOptionalTypeParameters", "TSHasOptionalTypeAnnotation") + .build("parameters") + .field("parameters", ParametersType); + }); + def("TSDeclareFunction") + .bases("Declaration", "TSHasOptionalTypeParameters") + .build("id", "params", "returnType") + .field("declare", Boolean, defaults["false"]) + .field("async", Boolean, defaults["false"]) + .field("generator", Boolean, defaults["false"]) + .field("id", or(def("Identifier"), null), defaults["null"]) + .field("params", [def("Pattern")]) + // tSFunctionTypeAnnotationCommon + .field("returnType", or(def("TSTypeAnnotation"), def("Noop"), // Still used? + null), defaults["null"]); + def("TSDeclareMethod") + .bases("Declaration", "TSHasOptionalTypeParameters") + .build("key", "params", "returnType") + .field("async", Boolean, defaults["false"]) + .field("generator", Boolean, defaults["false"]) + .field("params", [def("Pattern")]) + // classMethodOrPropertyCommon + .field("abstract", Boolean, defaults["false"]) + .field("accessibility", or("public", "private", "protected", void 0), defaults["undefined"]) + .field("static", Boolean, defaults["false"]) + .field("computed", Boolean, defaults["false"]) + .field("optional", Boolean, defaults["false"]) + .field("key", or(def("Identifier"), def("StringLiteral"), def("NumericLiteral"), + // Only allowed if .computed is true. + def("Expression"))) + // classMethodOrDeclareMethodCommon + .field("kind", or("get", "set", "method", "constructor"), function getDefault() { return "method"; }) + .field("access", // Not "accessibility"? + or("public", "private", "protected", void 0), defaults["undefined"]) + .field("decorators", or([def("Decorator")], null), defaults["null"]) + // tSFunctionTypeAnnotationCommon + .field("returnType", or(def("TSTypeAnnotation"), def("Noop"), // Still used? + null), defaults["null"]); + def("TSMappedType") + .bases("TSType") + .build("typeParameter", "typeAnnotation") + .field("readonly", or(Boolean, "+", "-"), defaults["false"]) + .field("typeParameter", def("TSTypeParameter")) + .field("optional", or(Boolean, "+", "-"), defaults["false"]) + .field("typeAnnotation", or(def("TSType"), null), defaults["null"]); + def("TSTupleType") + .bases("TSType") + .build("elementTypes") + .field("elementTypes", [or(def("TSType"), def("TSNamedTupleMember"))]); + def("TSNamedTupleMember") + .bases("TSType") + .build("label", "elementType", "optional") + .field("label", def("Identifier")) + .field("optional", Boolean, defaults["false"]) + .field("elementType", def("TSType")); + def("TSRestType") + .bases("TSType") + .build("typeAnnotation") + .field("typeAnnotation", def("TSType")); + def("TSOptionalType") + .bases("TSType") + .build("typeAnnotation") + .field("typeAnnotation", def("TSType")); + def("TSIndexedAccessType") + .bases("TSType") + .build("objectType", "indexType") + .field("objectType", def("TSType")) + .field("indexType", def("TSType")); + def("TSTypeOperator") + .bases("TSType") + .build("operator") + .field("operator", String) + .field("typeAnnotation", def("TSType")); + def("TSTypeAnnotation") + .bases("Node") + .build("typeAnnotation") + .field("typeAnnotation", or(def("TSType"), def("TSTypeAnnotation"))); + def("TSIndexSignature") + .bases("Declaration", "TSHasOptionalTypeAnnotation") + .build("parameters", "typeAnnotation") + .field("parameters", [def("Identifier")]) // Length === 1 + .field("readonly", Boolean, defaults["false"]); + def("TSPropertySignature") + .bases("Declaration", "TSHasOptionalTypeAnnotation") + .build("key", "typeAnnotation", "optional") + .field("key", def("Expression")) + .field("computed", Boolean, defaults["false"]) + .field("readonly", Boolean, defaults["false"]) + .field("optional", Boolean, defaults["false"]) + .field("initializer", or(def("Expression"), null), defaults["null"]); + def("TSMethodSignature") + .bases("Declaration", "TSHasOptionalTypeParameters", "TSHasOptionalTypeAnnotation") + .build("key", "parameters", "typeAnnotation") + .field("key", def("Expression")) + .field("computed", Boolean, defaults["false"]) + .field("optional", Boolean, defaults["false"]) + .field("parameters", ParametersType); + def("TSTypePredicate") + .bases("TSTypeAnnotation", "TSType") + .build("parameterName", "typeAnnotation", "asserts") + .field("parameterName", or(def("Identifier"), def("TSThisType"))) + .field("typeAnnotation", or(def("TSTypeAnnotation"), null), defaults["null"]) + .field("asserts", Boolean, defaults["false"]); + ["TSCallSignatureDeclaration", + "TSConstructSignatureDeclaration", + ].forEach(function (typeName) { + def(typeName) + .bases("Declaration", "TSHasOptionalTypeParameters", "TSHasOptionalTypeAnnotation") + .build("parameters", "typeAnnotation") + .field("parameters", ParametersType); + }); + def("TSEnumMember") + .bases("Node") + .build("id", "initializer") + .field("id", or(def("Identifier"), StringLiteral)) + .field("initializer", or(def("Expression"), null), defaults["null"]); + def("TSTypeQuery") + .bases("TSType") + .build("exprName") + .field("exprName", or(TSEntityName, def("TSImportType"))); + // Inferred from Babylon's tsParseTypeMember method. + var TSTypeMember = or(def("TSCallSignatureDeclaration"), def("TSConstructSignatureDeclaration"), def("TSIndexSignature"), def("TSMethodSignature"), def("TSPropertySignature")); + def("TSTypeLiteral") + .bases("TSType") + .build("members") + .field("members", [TSTypeMember]); + def("TSTypeParameter") + .bases("Identifier") + .build("name", "constraint", "default") + .field("name", String) + .field("constraint", or(def("TSType"), void 0), defaults["undefined"]) + .field("default", or(def("TSType"), void 0), defaults["undefined"]); + def("TSTypeAssertion") + .bases("Expression", "Pattern") + .build("typeAnnotation", "expression") + .field("typeAnnotation", def("TSType")) + .field("expression", def("Expression")) + .field("extra", or({ parenthesized: Boolean }, null), defaults["null"]); + def("TSTypeParameterDeclaration") + .bases("Declaration") + .build("params") + .field("params", [def("TSTypeParameter")]); + def("TSTypeParameterInstantiation") + .bases("Node") + .build("params") + .field("params", [def("TSType")]); + def("TSEnumDeclaration") + .bases("Declaration") + .build("id", "members") + .field("id", def("Identifier")) + .field("const", Boolean, defaults["false"]) + .field("declare", Boolean, defaults["false"]) + .field("members", [def("TSEnumMember")]) + .field("initializer", or(def("Expression"), null), defaults["null"]); + def("TSTypeAliasDeclaration") + .bases("Declaration", "TSHasOptionalTypeParameters") + .build("id", "typeAnnotation") + .field("id", def("Identifier")) + .field("declare", Boolean, defaults["false"]) + .field("typeAnnotation", def("TSType")); + def("TSModuleBlock") + .bases("Node") + .build("body") + .field("body", [def("Statement")]); + def("TSModuleDeclaration") + .bases("Declaration") + .build("id", "body") + .field("id", or(StringLiteral, TSEntityName)) + .field("declare", Boolean, defaults["false"]) + .field("global", Boolean, defaults["false"]) + .field("body", or(def("TSModuleBlock"), def("TSModuleDeclaration"), null), defaults["null"]); + def("TSImportType") + .bases("TSType", "TSHasOptionalTypeParameterInstantiation") + .build("argument", "qualifier", "typeParameters") + .field("argument", StringLiteral) + .field("qualifier", or(TSEntityName, void 0), defaults["undefined"]); + def("TSImportEqualsDeclaration") + .bases("Declaration") + .build("id", "moduleReference") + .field("id", def("Identifier")) + .field("isExport", Boolean, defaults["false"]) + .field("moduleReference", or(TSEntityName, def("TSExternalModuleReference"))); + def("TSExternalModuleReference") + .bases("Declaration") + .build("expression") + .field("expression", StringLiteral); + def("TSExportAssignment") + .bases("Statement") + .build("expression") + .field("expression", def("Expression")); + def("TSNamespaceExportDeclaration") + .bases("Declaration") + .build("id") + .field("id", def("Identifier")); + def("TSInterfaceBody") + .bases("Node") + .build("body") + .field("body", [TSTypeMember]); + def("TSExpressionWithTypeArguments") + .bases("TSType", "TSHasOptionalTypeParameterInstantiation") + .build("expression", "typeParameters") + .field("expression", TSEntityName); + def("TSInterfaceDeclaration") + .bases("Declaration", "TSHasOptionalTypeParameters") + .build("id", "body") + .field("id", TSEntityName) + .field("declare", Boolean, defaults["false"]) + .field("extends", or([def("TSExpressionWithTypeArguments")], null), defaults["null"]) + .field("body", def("TSInterfaceBody")); + def("TSParameterProperty") + .bases("Pattern") + .build("parameter") + .field("accessibility", or("public", "private", "protected", void 0), defaults["undefined"]) + .field("readonly", Boolean, defaults["false"]) + .field("parameter", or(def("Identifier"), def("AssignmentPattern"))); + def("ClassProperty") + .field("access", // Not "accessibility"? + or("public", "private", "protected", void 0), defaults["undefined"]); + // Defined already in es6 and babel-core. + def("ClassBody") + .field("body", [or(def("MethodDefinition"), def("VariableDeclarator"), def("ClassPropertyDefinition"), def("ClassProperty"), def("ClassPrivateProperty"), def("ClassMethod"), def("ClassPrivateMethod"), + // Just need to add these types: + def("TSDeclareMethod"), TSTypeMember)]); +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/fork.js": +/*!*******************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/fork.js ***! + \*******************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ./lib/types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var path_visitor_1 = tslib_1.__importDefault(__webpack_require__(/*! ./lib/path-visitor */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/path-visitor.js")); +var equiv_1 = tslib_1.__importDefault(__webpack_require__(/*! ./lib/equiv */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/equiv.js")); +var path_1 = tslib_1.__importDefault(__webpack_require__(/*! ./lib/path */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/path.js")); +var node_path_1 = tslib_1.__importDefault(__webpack_require__(/*! ./lib/node-path */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/node-path.js")); +function default_1(defs) { + var fork = createFork(); + var types = fork.use(types_1.default); + defs.forEach(fork.use); + types.finalize(); + var PathVisitor = fork.use(path_visitor_1.default); + return { + Type: types.Type, + builtInTypes: types.builtInTypes, + namedTypes: types.namedTypes, + builders: types.builders, + defineMethod: types.defineMethod, + getFieldNames: types.getFieldNames, + getFieldValue: types.getFieldValue, + eachField: types.eachField, + someField: types.someField, + getSupertypeNames: types.getSupertypeNames, + getBuilderName: types.getBuilderName, + astNodesAreEquivalent: fork.use(equiv_1.default), + finalize: types.finalize, + Path: fork.use(path_1.default), + NodePath: fork.use(node_path_1.default), + PathVisitor: PathVisitor, + use: fork.use, + visit: PathVisitor.visit, + }; +} +exports["default"] = default_1; +function createFork() { + var used = []; + var usedResult = []; + function use(plugin) { + var idx = used.indexOf(plugin); + if (idx === -1) { + idx = used.length; + used.push(plugin); + usedResult[idx] = plugin(fork); + } + return usedResult[idx]; + } + var fork = { use: use }; + return fork; +} +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/gen/namedTypes.js": +/*!*****************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/gen/namedTypes.js ***! + \*****************************************************************************************************************/ +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.namedTypes = void 0; +var namedTypes; +(function (namedTypes) { +})(namedTypes = exports.namedTypes || (exports.namedTypes = {})); + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/equiv.js": +/*!************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/equiv.js ***! + \************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ./types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +function default_1(fork) { + var types = fork.use(types_1.default); + var getFieldNames = types.getFieldNames; + var getFieldValue = types.getFieldValue; + var isArray = types.builtInTypes.array; + var isObject = types.builtInTypes.object; + var isDate = types.builtInTypes.Date; + var isRegExp = types.builtInTypes.RegExp; + var hasOwn = Object.prototype.hasOwnProperty; + function astNodesAreEquivalent(a, b, problemPath) { + if (isArray.check(problemPath)) { + problemPath.length = 0; + } + else { + problemPath = null; + } + return areEquivalent(a, b, problemPath); + } + astNodesAreEquivalent.assert = function (a, b) { + var problemPath = []; + if (!astNodesAreEquivalent(a, b, problemPath)) { + if (problemPath.length === 0) { + if (a !== b) { + throw new Error("Nodes must be equal"); + } + } + else { + throw new Error("Nodes differ in the following path: " + + problemPath.map(subscriptForProperty).join("")); + } + } + }; + function subscriptForProperty(property) { + if (/[_$a-z][_$a-z0-9]*/i.test(property)) { + return "." + property; + } + return "[" + JSON.stringify(property) + "]"; + } + function areEquivalent(a, b, problemPath) { + if (a === b) { + return true; + } + if (isArray.check(a)) { + return arraysAreEquivalent(a, b, problemPath); + } + if (isObject.check(a)) { + return objectsAreEquivalent(a, b, problemPath); + } + if (isDate.check(a)) { + return isDate.check(b) && (+a === +b); + } + if (isRegExp.check(a)) { + return isRegExp.check(b) && (a.source === b.source && + a.global === b.global && + a.multiline === b.multiline && + a.ignoreCase === b.ignoreCase); + } + return a == b; + } + function arraysAreEquivalent(a, b, problemPath) { + isArray.assert(a); + var aLength = a.length; + if (!isArray.check(b) || b.length !== aLength) { + if (problemPath) { + problemPath.push("length"); + } + return false; + } + for (var i = 0; i < aLength; ++i) { + if (problemPath) { + problemPath.push(i); + } + if (i in a !== i in b) { + return false; + } + if (!areEquivalent(a[i], b[i], problemPath)) { + return false; + } + if (problemPath) { + var problemPathTail = problemPath.pop(); + if (problemPathTail !== i) { + throw new Error("" + problemPathTail); + } + } + } + return true; + } + function objectsAreEquivalent(a, b, problemPath) { + isObject.assert(a); + if (!isObject.check(b)) { + return false; + } + // Fast path for a common property of AST nodes. + if (a.type !== b.type) { + if (problemPath) { + problemPath.push("type"); + } + return false; + } + var aNames = getFieldNames(a); + var aNameCount = aNames.length; + var bNames = getFieldNames(b); + var bNameCount = bNames.length; + if (aNameCount === bNameCount) { + for (var i = 0; i < aNameCount; ++i) { + var name = aNames[i]; + var aChild = getFieldValue(a, name); + var bChild = getFieldValue(b, name); + if (problemPath) { + problemPath.push(name); + } + if (!areEquivalent(aChild, bChild, problemPath)) { + return false; + } + if (problemPath) { + var problemPathTail = problemPath.pop(); + if (problemPathTail !== name) { + throw new Error("" + problemPathTail); + } + } + } + return true; + } + if (!problemPath) { + return false; + } + // Since aNameCount !== bNameCount, we need to find some name that's + // missing in aNames but present in bNames, or vice-versa. + var seenNames = Object.create(null); + for (i = 0; i < aNameCount; ++i) { + seenNames[aNames[i]] = true; + } + for (i = 0; i < bNameCount; ++i) { + name = bNames[i]; + if (!hasOwn.call(seenNames, name)) { + problemPath.push(name); + return false; + } + delete seenNames[name]; + } + for (name in seenNames) { + problemPath.push(name); + break; + } + return false; + } + return astNodesAreEquivalent; +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/node-path.js": +/*!****************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/node-path.js ***! + \****************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ./types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var path_1 = tslib_1.__importDefault(__webpack_require__(/*! ./path */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/path.js")); +var scope_1 = tslib_1.__importDefault(__webpack_require__(/*! ./scope */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/scope.js")); +function nodePathPlugin(fork) { + var types = fork.use(types_1.default); + var n = types.namedTypes; + var b = types.builders; + var isNumber = types.builtInTypes.number; + var isArray = types.builtInTypes.array; + var Path = fork.use(path_1.default); + var Scope = fork.use(scope_1.default); + var NodePath = function NodePath(value, parentPath, name) { + if (!(this instanceof NodePath)) { + throw new Error("NodePath constructor cannot be invoked without 'new'"); + } + Path.call(this, value, parentPath, name); + }; + var NPp = NodePath.prototype = Object.create(Path.prototype, { + constructor: { + value: NodePath, + enumerable: false, + writable: true, + configurable: true + } + }); + Object.defineProperties(NPp, { + node: { + get: function () { + Object.defineProperty(this, "node", { + configurable: true, + value: this._computeNode() + }); + return this.node; + } + }, + parent: { + get: function () { + Object.defineProperty(this, "parent", { + configurable: true, + value: this._computeParent() + }); + return this.parent; + } + }, + scope: { + get: function () { + Object.defineProperty(this, "scope", { + configurable: true, + value: this._computeScope() + }); + return this.scope; + } + } + }); + NPp.replace = function () { + delete this.node; + delete this.parent; + delete this.scope; + return Path.prototype.replace.apply(this, arguments); + }; + NPp.prune = function () { + var remainingNodePath = this.parent; + this.replace(); + return cleanUpNodesAfterPrune(remainingNodePath); + }; + // The value of the first ancestor Path whose value is a Node. + NPp._computeNode = function () { + var value = this.value; + if (n.Node.check(value)) { + return value; + } + var pp = this.parentPath; + return pp && pp.node || null; + }; + // The first ancestor Path whose value is a Node distinct from this.node. + NPp._computeParent = function () { + var value = this.value; + var pp = this.parentPath; + if (!n.Node.check(value)) { + while (pp && !n.Node.check(pp.value)) { + pp = pp.parentPath; + } + if (pp) { + pp = pp.parentPath; + } + } + while (pp && !n.Node.check(pp.value)) { + pp = pp.parentPath; + } + return pp || null; + }; + // The closest enclosing scope that governs this node. + NPp._computeScope = function () { + var value = this.value; + var pp = this.parentPath; + var scope = pp && pp.scope; + if (n.Node.check(value) && + Scope.isEstablishedBy(value)) { + scope = new Scope(this, scope); + } + return scope || null; + }; + NPp.getValueProperty = function (name) { + return types.getFieldValue(this.value, name); + }; + /** + * Determine whether this.node needs to be wrapped in parentheses in order + * for a parser to reproduce the same local AST structure. + * + * For instance, in the expression `(1 + 2) * 3`, the BinaryExpression + * whose operator is "+" needs parentheses, because `1 + 2 * 3` would + * parse differently. + * + * If assumeExpressionContext === true, we don't worry about edge cases + * like an anonymous FunctionExpression appearing lexically first in its + * enclosing statement and thus needing parentheses to avoid being parsed + * as a FunctionDeclaration with a missing name. + */ + NPp.needsParens = function (assumeExpressionContext) { + var pp = this.parentPath; + if (!pp) { + return false; + } + var node = this.value; + // Only expressions need parentheses. + if (!n.Expression.check(node)) { + return false; + } + // Identifiers never need parentheses. + if (node.type === "Identifier") { + return false; + } + while (!n.Node.check(pp.value)) { + pp = pp.parentPath; + if (!pp) { + return false; + } + } + var parent = pp.value; + switch (node.type) { + case "UnaryExpression": + case "SpreadElement": + case "SpreadProperty": + return parent.type === "MemberExpression" + && this.name === "object" + && parent.object === node; + case "BinaryExpression": + case "LogicalExpression": + switch (parent.type) { + case "CallExpression": + return this.name === "callee" + && parent.callee === node; + case "UnaryExpression": + case "SpreadElement": + case "SpreadProperty": + return true; + case "MemberExpression": + return this.name === "object" + && parent.object === node; + case "BinaryExpression": + case "LogicalExpression": { + var n_1 = node; + var po = parent.operator; + var pp_1 = PRECEDENCE[po]; + var no = n_1.operator; + var np = PRECEDENCE[no]; + if (pp_1 > np) { + return true; + } + if (pp_1 === np && this.name === "right") { + if (parent.right !== n_1) { + throw new Error("Nodes must be equal"); + } + return true; + } + } + default: + return false; + } + case "SequenceExpression": + switch (parent.type) { + case "ForStatement": + // Although parentheses wouldn't hurt around sequence + // expressions in the head of for loops, traditional style + // dictates that e.g. i++, j++ should not be wrapped with + // parentheses. + return false; + case "ExpressionStatement": + return this.name !== "expression"; + default: + // Otherwise err on the side of overparenthesization, adding + // explicit exceptions above if this proves overzealous. + return true; + } + case "YieldExpression": + switch (parent.type) { + case "BinaryExpression": + case "LogicalExpression": + case "UnaryExpression": + case "SpreadElement": + case "SpreadProperty": + case "CallExpression": + case "MemberExpression": + case "NewExpression": + case "ConditionalExpression": + case "YieldExpression": + return true; + default: + return false; + } + case "Literal": + return parent.type === "MemberExpression" + && isNumber.check(node.value) + && this.name === "object" + && parent.object === node; + case "AssignmentExpression": + case "ConditionalExpression": + switch (parent.type) { + case "UnaryExpression": + case "SpreadElement": + case "SpreadProperty": + case "BinaryExpression": + case "LogicalExpression": + return true; + case "CallExpression": + return this.name === "callee" + && parent.callee === node; + case "ConditionalExpression": + return this.name === "test" + && parent.test === node; + case "MemberExpression": + return this.name === "object" + && parent.object === node; + default: + return false; + } + default: + if (parent.type === "NewExpression" && + this.name === "callee" && + parent.callee === node) { + return containsCallExpression(node); + } + } + if (assumeExpressionContext !== true && + !this.canBeFirstInStatement() && + this.firstInStatement()) + return true; + return false; + }; + function isBinary(node) { + return n.BinaryExpression.check(node) + || n.LogicalExpression.check(node); + } + // @ts-ignore 'isUnaryLike' is declared but its value is never read. [6133] + function isUnaryLike(node) { + return n.UnaryExpression.check(node) + // I considered making SpreadElement and SpreadProperty subtypes + // of UnaryExpression, but they're not really Expression nodes. + || (n.SpreadElement && n.SpreadElement.check(node)) + || (n.SpreadProperty && n.SpreadProperty.check(node)); + } + var PRECEDENCE = {}; + [["||"], + ["&&"], + ["|"], + ["^"], + ["&"], + ["==", "===", "!=", "!=="], + ["<", ">", "<=", ">=", "in", "instanceof"], + [">>", "<<", ">>>"], + ["+", "-"], + ["*", "/", "%"] + ].forEach(function (tier, i) { + tier.forEach(function (op) { + PRECEDENCE[op] = i; + }); + }); + function containsCallExpression(node) { + if (n.CallExpression.check(node)) { + return true; + } + if (isArray.check(node)) { + return node.some(containsCallExpression); + } + if (n.Node.check(node)) { + return types.someField(node, function (_name, child) { + return containsCallExpression(child); + }); + } + return false; + } + NPp.canBeFirstInStatement = function () { + var node = this.node; + return !n.FunctionExpression.check(node) + && !n.ObjectExpression.check(node); + }; + NPp.firstInStatement = function () { + return firstInStatement(this); + }; + function firstInStatement(path) { + for (var node, parent; path.parent; path = path.parent) { + node = path.node; + parent = path.parent.node; + if (n.BlockStatement.check(parent) && + path.parent.name === "body" && + path.name === 0) { + if (parent.body[0] !== node) { + throw new Error("Nodes must be equal"); + } + return true; + } + if (n.ExpressionStatement.check(parent) && + path.name === "expression") { + if (parent.expression !== node) { + throw new Error("Nodes must be equal"); + } + return true; + } + if (n.SequenceExpression.check(parent) && + path.parent.name === "expressions" && + path.name === 0) { + if (parent.expressions[0] !== node) { + throw new Error("Nodes must be equal"); + } + continue; + } + if (n.CallExpression.check(parent) && + path.name === "callee") { + if (parent.callee !== node) { + throw new Error("Nodes must be equal"); + } + continue; + } + if (n.MemberExpression.check(parent) && + path.name === "object") { + if (parent.object !== node) { + throw new Error("Nodes must be equal"); + } + continue; + } + if (n.ConditionalExpression.check(parent) && + path.name === "test") { + if (parent.test !== node) { + throw new Error("Nodes must be equal"); + } + continue; + } + if (isBinary(parent) && + path.name === "left") { + if (parent.left !== node) { + throw new Error("Nodes must be equal"); + } + continue; + } + if (n.UnaryExpression.check(parent) && + !parent.prefix && + path.name === "argument") { + if (parent.argument !== node) { + throw new Error("Nodes must be equal"); + } + continue; + } + return false; + } + return true; + } + /** + * Pruning certain nodes will result in empty or incomplete nodes, here we clean those nodes up. + */ + function cleanUpNodesAfterPrune(remainingNodePath) { + if (n.VariableDeclaration.check(remainingNodePath.node)) { + var declarations = remainingNodePath.get('declarations').value; + if (!declarations || declarations.length === 0) { + return remainingNodePath.prune(); + } + } + else if (n.ExpressionStatement.check(remainingNodePath.node)) { + if (!remainingNodePath.get('expression').value) { + return remainingNodePath.prune(); + } + } + else if (n.IfStatement.check(remainingNodePath.node)) { + cleanUpIfStatementAfterPrune(remainingNodePath); + } + return remainingNodePath; + } + function cleanUpIfStatementAfterPrune(ifStatement) { + var testExpression = ifStatement.get('test').value; + var alternate = ifStatement.get('alternate').value; + var consequent = ifStatement.get('consequent').value; + if (!consequent && !alternate) { + var testExpressionStatement = b.expressionStatement(testExpression); + ifStatement.replace(testExpressionStatement); + } + else if (!consequent && alternate) { + var negatedTestExpression = b.unaryExpression('!', testExpression, true); + if (n.UnaryExpression.check(testExpression) && testExpression.operator === '!') { + negatedTestExpression = testExpression.argument; + } + ifStatement.get("test").replace(negatedTestExpression); + ifStatement.get("consequent").replace(alternate); + ifStatement.get("alternate").replace(); + } + } + return NodePath; +} +exports["default"] = nodePathPlugin; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/path-visitor.js": +/*!*******************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/path-visitor.js ***! + \*******************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ./types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var node_path_1 = tslib_1.__importDefault(__webpack_require__(/*! ./node-path */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/node-path.js")); +var hasOwn = Object.prototype.hasOwnProperty; +function pathVisitorPlugin(fork) { + var types = fork.use(types_1.default); + var NodePath = fork.use(node_path_1.default); + var isArray = types.builtInTypes.array; + var isObject = types.builtInTypes.object; + var isFunction = types.builtInTypes.function; + var undefined; + var PathVisitor = function PathVisitor() { + if (!(this instanceof PathVisitor)) { + throw new Error("PathVisitor constructor cannot be invoked without 'new'"); + } + // Permanent state. + this._reusableContextStack = []; + this._methodNameTable = computeMethodNameTable(this); + this._shouldVisitComments = + hasOwn.call(this._methodNameTable, "Block") || + hasOwn.call(this._methodNameTable, "Line"); + this.Context = makeContextConstructor(this); + // State reset every time PathVisitor.prototype.visit is called. + this._visiting = false; + this._changeReported = false; + }; + function computeMethodNameTable(visitor) { + var typeNames = Object.create(null); + for (var methodName in visitor) { + if (/^visit[A-Z]/.test(methodName)) { + typeNames[methodName.slice("visit".length)] = true; + } + } + var supertypeTable = types.computeSupertypeLookupTable(typeNames); + var methodNameTable = Object.create(null); + var typeNameKeys = Object.keys(supertypeTable); + var typeNameCount = typeNameKeys.length; + for (var i = 0; i < typeNameCount; ++i) { + var typeName = typeNameKeys[i]; + methodName = "visit" + supertypeTable[typeName]; + if (isFunction.check(visitor[methodName])) { + methodNameTable[typeName] = methodName; + } + } + return methodNameTable; + } + PathVisitor.fromMethodsObject = function fromMethodsObject(methods) { + if (methods instanceof PathVisitor) { + return methods; + } + if (!isObject.check(methods)) { + // An empty visitor? + return new PathVisitor; + } + var Visitor = function Visitor() { + if (!(this instanceof Visitor)) { + throw new Error("Visitor constructor cannot be invoked without 'new'"); + } + PathVisitor.call(this); + }; + var Vp = Visitor.prototype = Object.create(PVp); + Vp.constructor = Visitor; + extend(Vp, methods); + extend(Visitor, PathVisitor); + isFunction.assert(Visitor.fromMethodsObject); + isFunction.assert(Visitor.visit); + return new Visitor; + }; + function extend(target, source) { + for (var property in source) { + if (hasOwn.call(source, property)) { + target[property] = source[property]; + } + } + return target; + } + PathVisitor.visit = function visit(node, methods) { + return PathVisitor.fromMethodsObject(methods).visit(node); + }; + var PVp = PathVisitor.prototype; + PVp.visit = function () { + if (this._visiting) { + throw new Error("Recursively calling visitor.visit(path) resets visitor state. " + + "Try this.visit(path) or this.traverse(path) instead."); + } + // Private state that needs to be reset before every traversal. + this._visiting = true; + this._changeReported = false; + this._abortRequested = false; + var argc = arguments.length; + var args = new Array(argc); + for (var i = 0; i < argc; ++i) { + args[i] = arguments[i]; + } + if (!(args[0] instanceof NodePath)) { + args[0] = new NodePath({ root: args[0] }).get("root"); + } + // Called with the same arguments as .visit. + this.reset.apply(this, args); + var didNotThrow; + try { + var root = this.visitWithoutReset(args[0]); + didNotThrow = true; + } + finally { + this._visiting = false; + if (!didNotThrow && this._abortRequested) { + // If this.visitWithoutReset threw an exception and + // this._abortRequested was set to true, return the root of + // the AST instead of letting the exception propagate, so that + // client code does not have to provide a try-catch block to + // intercept the AbortRequest exception. Other kinds of + // exceptions will propagate without being intercepted and + // rethrown by a catch block, so their stacks will accurately + // reflect the original throwing context. + return args[0].value; + } + } + return root; + }; + PVp.AbortRequest = function AbortRequest() { }; + PVp.abort = function () { + var visitor = this; + visitor._abortRequested = true; + var request = new visitor.AbortRequest(); + // If you decide to catch this exception and stop it from propagating, + // make sure to call its cancel method to avoid silencing other + // exceptions that might be thrown later in the traversal. + request.cancel = function () { + visitor._abortRequested = false; + }; + throw request; + }; + PVp.reset = function (_path /*, additional arguments */) { + // Empty stub; may be reassigned or overridden by subclasses. + }; + PVp.visitWithoutReset = function (path) { + if (this instanceof this.Context) { + // Since this.Context.prototype === this, there's a chance we + // might accidentally call context.visitWithoutReset. If that + // happens, re-invoke the method against context.visitor. + return this.visitor.visitWithoutReset(path); + } + if (!(path instanceof NodePath)) { + throw new Error(""); + } + var value = path.value; + var methodName = value && + typeof value === "object" && + typeof value.type === "string" && + this._methodNameTable[value.type]; + if (methodName) { + var context = this.acquireContext(path); + try { + return context.invokeVisitorMethod(methodName); + } + finally { + this.releaseContext(context); + } + } + else { + // If there was no visitor method to call, visit the children of + // this node generically. + return visitChildren(path, this); + } + }; + function visitChildren(path, visitor) { + if (!(path instanceof NodePath)) { + throw new Error(""); + } + if (!(visitor instanceof PathVisitor)) { + throw new Error(""); + } + var value = path.value; + if (isArray.check(value)) { + path.each(visitor.visitWithoutReset, visitor); + } + else if (!isObject.check(value)) { + // No children to visit. + } + else { + var childNames = types.getFieldNames(value); + // The .comments field of the Node type is hidden, so we only + // visit it if the visitor defines visitBlock or visitLine, and + // value.comments is defined. + if (visitor._shouldVisitComments && + value.comments && + childNames.indexOf("comments") < 0) { + childNames.push("comments"); + } + var childCount = childNames.length; + var childPaths = []; + for (var i = 0; i < childCount; ++i) { + var childName = childNames[i]; + if (!hasOwn.call(value, childName)) { + value[childName] = types.getFieldValue(value, childName); + } + childPaths.push(path.get(childName)); + } + for (var i = 0; i < childCount; ++i) { + visitor.visitWithoutReset(childPaths[i]); + } + } + return path.value; + } + PVp.acquireContext = function (path) { + if (this._reusableContextStack.length === 0) { + return new this.Context(path); + } + return this._reusableContextStack.pop().reset(path); + }; + PVp.releaseContext = function (context) { + if (!(context instanceof this.Context)) { + throw new Error(""); + } + this._reusableContextStack.push(context); + context.currentPath = null; + }; + PVp.reportChanged = function () { + this._changeReported = true; + }; + PVp.wasChangeReported = function () { + return this._changeReported; + }; + function makeContextConstructor(visitor) { + function Context(path) { + if (!(this instanceof Context)) { + throw new Error(""); + } + if (!(this instanceof PathVisitor)) { + throw new Error(""); + } + if (!(path instanceof NodePath)) { + throw new Error(""); + } + Object.defineProperty(this, "visitor", { + value: visitor, + writable: false, + enumerable: true, + configurable: false + }); + this.currentPath = path; + this.needToCallTraverse = true; + Object.seal(this); + } + if (!(visitor instanceof PathVisitor)) { + throw new Error(""); + } + // Note that the visitor object is the prototype of Context.prototype, + // so all visitor methods are inherited by context objects. + var Cp = Context.prototype = Object.create(visitor); + Cp.constructor = Context; + extend(Cp, sharedContextProtoMethods); + return Context; + } + // Every PathVisitor has a different this.Context constructor and + // this.Context.prototype object, but those prototypes can all use the + // same reset, invokeVisitorMethod, and traverse function objects. + var sharedContextProtoMethods = Object.create(null); + sharedContextProtoMethods.reset = + function reset(path) { + if (!(this instanceof this.Context)) { + throw new Error(""); + } + if (!(path instanceof NodePath)) { + throw new Error(""); + } + this.currentPath = path; + this.needToCallTraverse = true; + return this; + }; + sharedContextProtoMethods.invokeVisitorMethod = + function invokeVisitorMethod(methodName) { + if (!(this instanceof this.Context)) { + throw new Error(""); + } + if (!(this.currentPath instanceof NodePath)) { + throw new Error(""); + } + var result = this.visitor[methodName].call(this, this.currentPath); + if (result === false) { + // Visitor methods return false to indicate that they have handled + // their own traversal needs, and we should not complain if + // this.needToCallTraverse is still true. + this.needToCallTraverse = false; + } + else if (result !== undefined) { + // Any other non-undefined value returned from the visitor method + // is interpreted as a replacement value. + this.currentPath = this.currentPath.replace(result)[0]; + if (this.needToCallTraverse) { + // If this.traverse still hasn't been called, visit the + // children of the replacement node. + this.traverse(this.currentPath); + } + } + if (this.needToCallTraverse !== false) { + throw new Error("Must either call this.traverse or return false in " + methodName); + } + var path = this.currentPath; + return path && path.value; + }; + sharedContextProtoMethods.traverse = + function traverse(path, newVisitor) { + if (!(this instanceof this.Context)) { + throw new Error(""); + } + if (!(path instanceof NodePath)) { + throw new Error(""); + } + if (!(this.currentPath instanceof NodePath)) { + throw new Error(""); + } + this.needToCallTraverse = false; + return visitChildren(path, PathVisitor.fromMethodsObject(newVisitor || this.visitor)); + }; + sharedContextProtoMethods.visit = + function visit(path, newVisitor) { + if (!(this instanceof this.Context)) { + throw new Error(""); + } + if (!(path instanceof NodePath)) { + throw new Error(""); + } + if (!(this.currentPath instanceof NodePath)) { + throw new Error(""); + } + this.needToCallTraverse = false; + return PathVisitor.fromMethodsObject(newVisitor || this.visitor).visitWithoutReset(path); + }; + sharedContextProtoMethods.reportChanged = function reportChanged() { + this.visitor.reportChanged(); + }; + sharedContextProtoMethods.abort = function abort() { + this.needToCallTraverse = false; + this.visitor.abort(); + }; + return PathVisitor; +} +exports["default"] = pathVisitorPlugin; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/path.js": +/*!***********************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/path.js ***! + \***********************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ./types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var Op = Object.prototype; +var hasOwn = Op.hasOwnProperty; +function pathPlugin(fork) { + var types = fork.use(types_1.default); + var isArray = types.builtInTypes.array; + var isNumber = types.builtInTypes.number; + var Path = function Path(value, parentPath, name) { + if (!(this instanceof Path)) { + throw new Error("Path constructor cannot be invoked without 'new'"); + } + if (parentPath) { + if (!(parentPath instanceof Path)) { + throw new Error(""); + } + } + else { + parentPath = null; + name = null; + } + // The value encapsulated by this Path, generally equal to + // parentPath.value[name] if we have a parentPath. + this.value = value; + // The immediate parent Path of this Path. + this.parentPath = parentPath; + // The name of the property of parentPath.value through which this + // Path's value was reached. + this.name = name; + // Calling path.get("child") multiple times always returns the same + // child Path object, for both performance and consistency reasons. + this.__childCache = null; + }; + var Pp = Path.prototype; + function getChildCache(path) { + // Lazily create the child cache. This also cheapens cache + // invalidation, since you can just reset path.__childCache to null. + return path.__childCache || (path.__childCache = Object.create(null)); + } + function getChildPath(path, name) { + var cache = getChildCache(path); + var actualChildValue = path.getValueProperty(name); + var childPath = cache[name]; + if (!hasOwn.call(cache, name) || + // Ensure consistency between cache and reality. + childPath.value !== actualChildValue) { + childPath = cache[name] = new path.constructor(actualChildValue, path, name); + } + return childPath; + } + // This method is designed to be overridden by subclasses that need to + // handle missing properties, etc. + Pp.getValueProperty = function getValueProperty(name) { + return this.value[name]; + }; + Pp.get = function get() { + var names = []; + for (var _i = 0; _i < arguments.length; _i++) { + names[_i] = arguments[_i]; + } + var path = this; + var count = names.length; + for (var i = 0; i < count; ++i) { + path = getChildPath(path, names[i]); + } + return path; + }; + Pp.each = function each(callback, context) { + var childPaths = []; + var len = this.value.length; + var i = 0; + // Collect all the original child paths before invoking the callback. + for (var i = 0; i < len; ++i) { + if (hasOwn.call(this.value, i)) { + childPaths[i] = this.get(i); + } + } + // Invoke the callback on just the original child paths, regardless of + // any modifications made to the array by the callback. I chose these + // semantics over cleverly invoking the callback on new elements because + // this way is much easier to reason about. + context = context || this; + for (i = 0; i < len; ++i) { + if (hasOwn.call(childPaths, i)) { + callback.call(context, childPaths[i]); + } + } + }; + Pp.map = function map(callback, context) { + var result = []; + this.each(function (childPath) { + result.push(callback.call(this, childPath)); + }, context); + return result; + }; + Pp.filter = function filter(callback, context) { + var result = []; + this.each(function (childPath) { + if (callback.call(this, childPath)) { + result.push(childPath); + } + }, context); + return result; + }; + function emptyMoves() { } + function getMoves(path, offset, start, end) { + isArray.assert(path.value); + if (offset === 0) { + return emptyMoves; + } + var length = path.value.length; + if (length < 1) { + return emptyMoves; + } + var argc = arguments.length; + if (argc === 2) { + start = 0; + end = length; + } + else if (argc === 3) { + start = Math.max(start, 0); + end = length; + } + else { + start = Math.max(start, 0); + end = Math.min(end, length); + } + isNumber.assert(start); + isNumber.assert(end); + var moves = Object.create(null); + var cache = getChildCache(path); + for (var i = start; i < end; ++i) { + if (hasOwn.call(path.value, i)) { + var childPath = path.get(i); + if (childPath.name !== i) { + throw new Error(""); + } + var newIndex = i + offset; + childPath.name = newIndex; + moves[newIndex] = childPath; + delete cache[i]; + } + } + delete cache.length; + return function () { + for (var newIndex in moves) { + var childPath = moves[newIndex]; + if (childPath.name !== +newIndex) { + throw new Error(""); + } + cache[newIndex] = childPath; + path.value[newIndex] = childPath.value; + } + }; + } + Pp.shift = function shift() { + var move = getMoves(this, -1); + var result = this.value.shift(); + move(); + return result; + }; + Pp.unshift = function unshift() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + var move = getMoves(this, args.length); + var result = this.value.unshift.apply(this.value, args); + move(); + return result; + }; + Pp.push = function push() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + isArray.assert(this.value); + delete getChildCache(this).length; + return this.value.push.apply(this.value, args); + }; + Pp.pop = function pop() { + isArray.assert(this.value); + var cache = getChildCache(this); + delete cache[this.value.length - 1]; + delete cache.length; + return this.value.pop(); + }; + Pp.insertAt = function insertAt(index) { + var argc = arguments.length; + var move = getMoves(this, argc - 1, index); + if (move === emptyMoves && argc <= 1) { + return this; + } + index = Math.max(index, 0); + for (var i = 1; i < argc; ++i) { + this.value[index + i - 1] = arguments[i]; + } + move(); + return this; + }; + Pp.insertBefore = function insertBefore() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + var pp = this.parentPath; + var argc = args.length; + var insertAtArgs = [this.name]; + for (var i = 0; i < argc; ++i) { + insertAtArgs.push(args[i]); + } + return pp.insertAt.apply(pp, insertAtArgs); + }; + Pp.insertAfter = function insertAfter() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + var pp = this.parentPath; + var argc = args.length; + var insertAtArgs = [this.name + 1]; + for (var i = 0; i < argc; ++i) { + insertAtArgs.push(args[i]); + } + return pp.insertAt.apply(pp, insertAtArgs); + }; + function repairRelationshipWithParent(path) { + if (!(path instanceof Path)) { + throw new Error(""); + } + var pp = path.parentPath; + if (!pp) { + // Orphan paths have no relationship to repair. + return path; + } + var parentValue = pp.value; + var parentCache = getChildCache(pp); + // Make sure parentCache[path.name] is populated. + if (parentValue[path.name] === path.value) { + parentCache[path.name] = path; + } + else if (isArray.check(parentValue)) { + // Something caused path.name to become out of date, so attempt to + // recover by searching for path.value in parentValue. + var i = parentValue.indexOf(path.value); + if (i >= 0) { + parentCache[path.name = i] = path; + } + } + else { + // If path.value disagrees with parentValue[path.name], and + // path.name is not an array index, let path.value become the new + // parentValue[path.name] and update parentCache accordingly. + parentValue[path.name] = path.value; + parentCache[path.name] = path; + } + if (parentValue[path.name] !== path.value) { + throw new Error(""); + } + if (path.parentPath.get(path.name) !== path) { + throw new Error(""); + } + return path; + } + Pp.replace = function replace(replacement) { + var results = []; + var parentValue = this.parentPath.value; + var parentCache = getChildCache(this.parentPath); + var count = arguments.length; + repairRelationshipWithParent(this); + if (isArray.check(parentValue)) { + var originalLength = parentValue.length; + var move = getMoves(this.parentPath, count - 1, this.name + 1); + var spliceArgs = [this.name, 1]; + for (var i = 0; i < count; ++i) { + spliceArgs.push(arguments[i]); + } + var splicedOut = parentValue.splice.apply(parentValue, spliceArgs); + if (splicedOut[0] !== this.value) { + throw new Error(""); + } + if (parentValue.length !== (originalLength - 1 + count)) { + throw new Error(""); + } + move(); + if (count === 0) { + delete this.value; + delete parentCache[this.name]; + this.__childCache = null; + } + else { + if (parentValue[this.name] !== replacement) { + throw new Error(""); + } + if (this.value !== replacement) { + this.value = replacement; + this.__childCache = null; + } + for (i = 0; i < count; ++i) { + results.push(this.parentPath.get(this.name + i)); + } + if (results[0] !== this) { + throw new Error(""); + } + } + } + else if (count === 1) { + if (this.value !== replacement) { + this.__childCache = null; + } + this.value = parentValue[this.name] = replacement; + results.push(this); + } + else if (count === 0) { + delete parentValue[this.name]; + delete this.value; + this.__childCache = null; + // Leave this path cached as parentCache[this.name], even though + // it no longer has a value defined. + } + else { + throw new Error("Could not replace path"); + } + return results; + }; + return Path; +} +exports["default"] = pathPlugin; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/scope.js": +/*!************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/scope.js ***! + \************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ./types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +var hasOwn = Object.prototype.hasOwnProperty; +function scopePlugin(fork) { + var types = fork.use(types_1.default); + var Type = types.Type; + var namedTypes = types.namedTypes; + var Node = namedTypes.Node; + var Expression = namedTypes.Expression; + var isArray = types.builtInTypes.array; + var b = types.builders; + var Scope = function Scope(path, parentScope) { + if (!(this instanceof Scope)) { + throw new Error("Scope constructor cannot be invoked without 'new'"); + } + ScopeType.assert(path.value); + var depth; + if (parentScope) { + if (!(parentScope instanceof Scope)) { + throw new Error(""); + } + depth = parentScope.depth + 1; + } + else { + parentScope = null; + depth = 0; + } + Object.defineProperties(this, { + path: { value: path }, + node: { value: path.value }, + isGlobal: { value: !parentScope, enumerable: true }, + depth: { value: depth }, + parent: { value: parentScope }, + bindings: { value: {} }, + types: { value: {} }, + }); + }; + var scopeTypes = [ + // Program nodes introduce global scopes. + namedTypes.Program, + // Function is the supertype of FunctionExpression, + // FunctionDeclaration, ArrowExpression, etc. + namedTypes.Function, + // In case you didn't know, the caught parameter shadows any variable + // of the same name in an outer scope. + namedTypes.CatchClause + ]; + var ScopeType = Type.or.apply(Type, scopeTypes); + Scope.isEstablishedBy = function (node) { + return ScopeType.check(node); + }; + var Sp = Scope.prototype; + // Will be overridden after an instance lazily calls scanScope. + Sp.didScan = false; + Sp.declares = function (name) { + this.scan(); + return hasOwn.call(this.bindings, name); + }; + Sp.declaresType = function (name) { + this.scan(); + return hasOwn.call(this.types, name); + }; + Sp.declareTemporary = function (prefix) { + if (prefix) { + if (!/^[a-z$_]/i.test(prefix)) { + throw new Error(""); + } + } + else { + prefix = "t$"; + } + // Include this.depth in the name to make sure the name does not + // collide with any variables in nested/enclosing scopes. + prefix += this.depth.toString(36) + "$"; + this.scan(); + var index = 0; + while (this.declares(prefix + index)) { + ++index; + } + var name = prefix + index; + return this.bindings[name] = types.builders.identifier(name); + }; + Sp.injectTemporary = function (identifier, init) { + identifier || (identifier = this.declareTemporary()); + var bodyPath = this.path.get("body"); + if (namedTypes.BlockStatement.check(bodyPath.value)) { + bodyPath = bodyPath.get("body"); + } + bodyPath.unshift(b.variableDeclaration("var", [b.variableDeclarator(identifier, init || null)])); + return identifier; + }; + Sp.scan = function (force) { + if (force || !this.didScan) { + for (var name in this.bindings) { + // Empty out this.bindings, just in cases. + delete this.bindings[name]; + } + scanScope(this.path, this.bindings, this.types); + this.didScan = true; + } + }; + Sp.getBindings = function () { + this.scan(); + return this.bindings; + }; + Sp.getTypes = function () { + this.scan(); + return this.types; + }; + function scanScope(path, bindings, scopeTypes) { + var node = path.value; + ScopeType.assert(node); + if (namedTypes.CatchClause.check(node)) { + // A catch clause establishes a new scope but the only variable + // bound in that scope is the catch parameter. Any other + // declarations create bindings in the outer scope. + var param = path.get("param"); + if (param.value) { + addPattern(param, bindings); + } + } + else { + recursiveScanScope(path, bindings, scopeTypes); + } + } + function recursiveScanScope(path, bindings, scopeTypes) { + var node = path.value; + if (path.parent && + namedTypes.FunctionExpression.check(path.parent.node) && + path.parent.node.id) { + addPattern(path.parent.get("id"), bindings); + } + if (!node) { + // None of the remaining cases matter if node is falsy. + } + else if (isArray.check(node)) { + path.each(function (childPath) { + recursiveScanChild(childPath, bindings, scopeTypes); + }); + } + else if (namedTypes.Function.check(node)) { + path.get("params").each(function (paramPath) { + addPattern(paramPath, bindings); + }); + recursiveScanChild(path.get("body"), bindings, scopeTypes); + } + else if ((namedTypes.TypeAlias && namedTypes.TypeAlias.check(node)) || + (namedTypes.InterfaceDeclaration && namedTypes.InterfaceDeclaration.check(node)) || + (namedTypes.TSTypeAliasDeclaration && namedTypes.TSTypeAliasDeclaration.check(node)) || + (namedTypes.TSInterfaceDeclaration && namedTypes.TSInterfaceDeclaration.check(node))) { + addTypePattern(path.get("id"), scopeTypes); + } + else if (namedTypes.VariableDeclarator.check(node)) { + addPattern(path.get("id"), bindings); + recursiveScanChild(path.get("init"), bindings, scopeTypes); + } + else if (node.type === "ImportSpecifier" || + node.type === "ImportNamespaceSpecifier" || + node.type === "ImportDefaultSpecifier") { + addPattern( + // Esprima used to use the .name field to refer to the local + // binding identifier for ImportSpecifier nodes, but .id for + // ImportNamespaceSpecifier and ImportDefaultSpecifier nodes. + // ESTree/Acorn/ESpree use .local for all three node types. + path.get(node.local ? "local" : + node.name ? "name" : "id"), bindings); + } + else if (Node.check(node) && !Expression.check(node)) { + types.eachField(node, function (name, child) { + var childPath = path.get(name); + if (!pathHasValue(childPath, child)) { + throw new Error(""); + } + recursiveScanChild(childPath, bindings, scopeTypes); + }); + } + } + function pathHasValue(path, value) { + if (path.value === value) { + return true; + } + // Empty arrays are probably produced by defaults.emptyArray, in which + // case is makes sense to regard them as equivalent, if not ===. + if (Array.isArray(path.value) && + path.value.length === 0 && + Array.isArray(value) && + value.length === 0) { + return true; + } + return false; + } + function recursiveScanChild(path, bindings, scopeTypes) { + var node = path.value; + if (!node || Expression.check(node)) { + // Ignore falsy values and Expressions. + } + else if (namedTypes.FunctionDeclaration.check(node) && + node.id !== null) { + addPattern(path.get("id"), bindings); + } + else if (namedTypes.ClassDeclaration && + namedTypes.ClassDeclaration.check(node)) { + addPattern(path.get("id"), bindings); + } + else if (ScopeType.check(node)) { + if (namedTypes.CatchClause.check(node) && + // TODO Broaden this to accept any pattern. + namedTypes.Identifier.check(node.param)) { + var catchParamName = node.param.name; + var hadBinding = hasOwn.call(bindings, catchParamName); + // Any declarations that occur inside the catch body that do + // not have the same name as the catch parameter should count + // as bindings in the outer scope. + recursiveScanScope(path.get("body"), bindings, scopeTypes); + // If a new binding matching the catch parameter name was + // created while scanning the catch body, ignore it because it + // actually refers to the catch parameter and not the outer + // scope that we're currently scanning. + if (!hadBinding) { + delete bindings[catchParamName]; + } + } + } + else { + recursiveScanScope(path, bindings, scopeTypes); + } + } + function addPattern(patternPath, bindings) { + var pattern = patternPath.value; + namedTypes.Pattern.assert(pattern); + if (namedTypes.Identifier.check(pattern)) { + if (hasOwn.call(bindings, pattern.name)) { + bindings[pattern.name].push(patternPath); + } + else { + bindings[pattern.name] = [patternPath]; + } + } + else if (namedTypes.AssignmentPattern && + namedTypes.AssignmentPattern.check(pattern)) { + addPattern(patternPath.get('left'), bindings); + } + else if (namedTypes.ObjectPattern && + namedTypes.ObjectPattern.check(pattern)) { + patternPath.get('properties').each(function (propertyPath) { + var property = propertyPath.value; + if (namedTypes.Pattern.check(property)) { + addPattern(propertyPath, bindings); + } + else if (namedTypes.Property.check(property)) { + addPattern(propertyPath.get('value'), bindings); + } + else if (namedTypes.SpreadProperty && + namedTypes.SpreadProperty.check(property)) { + addPattern(propertyPath.get('argument'), bindings); + } + }); + } + else if (namedTypes.ArrayPattern && + namedTypes.ArrayPattern.check(pattern)) { + patternPath.get('elements').each(function (elementPath) { + var element = elementPath.value; + if (namedTypes.Pattern.check(element)) { + addPattern(elementPath, bindings); + } + else if (namedTypes.SpreadElement && + namedTypes.SpreadElement.check(element)) { + addPattern(elementPath.get("argument"), bindings); + } + }); + } + else if (namedTypes.PropertyPattern && + namedTypes.PropertyPattern.check(pattern)) { + addPattern(patternPath.get('pattern'), bindings); + } + else if ((namedTypes.SpreadElementPattern && + namedTypes.SpreadElementPattern.check(pattern)) || + (namedTypes.SpreadPropertyPattern && + namedTypes.SpreadPropertyPattern.check(pattern))) { + addPattern(patternPath.get('argument'), bindings); + } + } + function addTypePattern(patternPath, types) { + var pattern = patternPath.value; + namedTypes.Pattern.assert(pattern); + if (namedTypes.Identifier.check(pattern)) { + if (hasOwn.call(types, pattern.name)) { + types[pattern.name].push(patternPath); + } + else { + types[pattern.name] = [patternPath]; + } + } + } + Sp.lookup = function (name) { + for (var scope = this; scope; scope = scope.parent) + if (scope.declares(name)) + break; + return scope; + }; + Sp.lookupType = function (name) { + for (var scope = this; scope; scope = scope.parent) + if (scope.declaresType(name)) + break; + return scope; + }; + Sp.getGlobalScope = function () { + var scope = this; + while (!scope.isGlobal) + scope = scope.parent; + return scope; + }; + return Scope; +} +exports["default"] = scopePlugin; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js": +/*!*************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/shared.js ***! + \*************************************************************************************************************/ +/***/ ((module, exports, __webpack_require__) => { + +"use strict"; +; +Object.defineProperty(exports, "__esModule", ({ value: true })); +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var types_1 = tslib_1.__importDefault(__webpack_require__(/*! ./types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js")); +function default_1(fork) { + var types = fork.use(types_1.default); + var Type = types.Type; + var builtin = types.builtInTypes; + var isNumber = builtin.number; + // An example of constructing a new type with arbitrary constraints from + // an existing type. + function geq(than) { + return Type.from(function (value) { return isNumber.check(value) && value >= than; }, isNumber + " >= " + than); + } + ; + // Default value-returning functions that may optionally be passed as a + // third argument to Def.prototype.field. + var defaults = { + // Functions were used because (among other reasons) that's the most + // elegant way to allow for the emptyArray one always to give a new + // array instance. + "null": function () { return null; }, + "emptyArray": function () { return []; }, + "false": function () { return false; }, + "true": function () { return true; }, + "undefined": function () { }, + "use strict": function () { return "use strict"; } + }; + var naiveIsPrimitive = Type.or(builtin.string, builtin.number, builtin.boolean, builtin.null, builtin.undefined); + var isPrimitive = Type.from(function (value) { + if (value === null) + return true; + var type = typeof value; + if (type === "object" || + type === "function") { + return false; + } + return true; + }, naiveIsPrimitive.toString()); + return { + geq: geq, + defaults: defaults, + isPrimitive: isPrimitive, + }; +} +exports["default"] = default_1; +module.exports = exports["default"]; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js": +/*!************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/lib/types.js ***! + \************************************************************************************************************/ +/***/ ((__unused_webpack_module, exports, __webpack_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.Def = void 0; +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var Op = Object.prototype; +var objToStr = Op.toString; +var hasOwn = Op.hasOwnProperty; +var BaseType = /** @class */ (function () { + function BaseType() { + } + BaseType.prototype.assert = function (value, deep) { + if (!this.check(value, deep)) { + var str = shallowStringify(value); + throw new Error(str + " does not match type " + this); + } + return true; + }; + BaseType.prototype.arrayOf = function () { + var elemType = this; + return new ArrayType(elemType); + }; + return BaseType; +}()); +var ArrayType = /** @class */ (function (_super) { + tslib_1.__extends(ArrayType, _super); + function ArrayType(elemType) { + var _this = _super.call(this) || this; + _this.elemType = elemType; + _this.kind = "ArrayType"; + return _this; + } + ArrayType.prototype.toString = function () { + return "[" + this.elemType + "]"; + }; + ArrayType.prototype.check = function (value, deep) { + var _this = this; + return Array.isArray(value) && value.every(function (elem) { return _this.elemType.check(elem, deep); }); + }; + return ArrayType; +}(BaseType)); +var IdentityType = /** @class */ (function (_super) { + tslib_1.__extends(IdentityType, _super); + function IdentityType(value) { + var _this = _super.call(this) || this; + _this.value = value; + _this.kind = "IdentityType"; + return _this; + } + IdentityType.prototype.toString = function () { + return String(this.value); + }; + IdentityType.prototype.check = function (value, deep) { + var result = value === this.value; + if (!result && typeof deep === "function") { + deep(this, value); + } + return result; + }; + return IdentityType; +}(BaseType)); +var ObjectType = /** @class */ (function (_super) { + tslib_1.__extends(ObjectType, _super); + function ObjectType(fields) { + var _this = _super.call(this) || this; + _this.fields = fields; + _this.kind = "ObjectType"; + return _this; + } + ObjectType.prototype.toString = function () { + return "{ " + this.fields.join(", ") + " }"; + }; + ObjectType.prototype.check = function (value, deep) { + return (objToStr.call(value) === objToStr.call({}) && + this.fields.every(function (field) { + return field.type.check(value[field.name], deep); + })); + }; + return ObjectType; +}(BaseType)); +var OrType = /** @class */ (function (_super) { + tslib_1.__extends(OrType, _super); + function OrType(types) { + var _this = _super.call(this) || this; + _this.types = types; + _this.kind = "OrType"; + return _this; + } + OrType.prototype.toString = function () { + return this.types.join(" | "); + }; + OrType.prototype.check = function (value, deep) { + return this.types.some(function (type) { + return type.check(value, deep); + }); + }; + return OrType; +}(BaseType)); +var PredicateType = /** @class */ (function (_super) { + tslib_1.__extends(PredicateType, _super); + function PredicateType(name, predicate) { + var _this = _super.call(this) || this; + _this.name = name; + _this.predicate = predicate; + _this.kind = "PredicateType"; + return _this; + } + PredicateType.prototype.toString = function () { + return this.name; + }; + PredicateType.prototype.check = function (value, deep) { + var result = this.predicate(value, deep); + if (!result && typeof deep === "function") { + deep(this, value); + } + return result; + }; + return PredicateType; +}(BaseType)); +var Def = /** @class */ (function () { + function Def(type, typeName) { + this.type = type; + this.typeName = typeName; + this.baseNames = []; + this.ownFields = Object.create(null); + // Includes own typeName. Populated during finalization. + this.allSupertypes = Object.create(null); + // Linear inheritance hierarchy. Populated during finalization. + this.supertypeList = []; + // Includes inherited fields. + this.allFields = Object.create(null); + // Non-hidden keys of allFields. + this.fieldNames = []; + // This property will be overridden as true by individual Def instances + // when they are finalized. + this.finalized = false; + // False by default until .build(...) is called on an instance. + this.buildable = false; + this.buildParams = []; + } + Def.prototype.isSupertypeOf = function (that) { + if (that instanceof Def) { + if (this.finalized !== true || + that.finalized !== true) { + throw new Error(""); + } + return hasOwn.call(that.allSupertypes, this.typeName); + } + else { + throw new Error(that + " is not a Def"); + } + }; + Def.prototype.checkAllFields = function (value, deep) { + var allFields = this.allFields; + if (this.finalized !== true) { + throw new Error("" + this.typeName); + } + function checkFieldByName(name) { + var field = allFields[name]; + var type = field.type; + var child = field.getValue(value); + return type.check(child, deep); + } + return value !== null && + typeof value === "object" && + Object.keys(allFields).every(checkFieldByName); + }; + Def.prototype.bases = function () { + var supertypeNames = []; + for (var _i = 0; _i < arguments.length; _i++) { + supertypeNames[_i] = arguments[_i]; + } + var bases = this.baseNames; + if (this.finalized) { + if (supertypeNames.length !== bases.length) { + throw new Error(""); + } + for (var i = 0; i < supertypeNames.length; i++) { + if (supertypeNames[i] !== bases[i]) { + throw new Error(""); + } + } + return this; + } + supertypeNames.forEach(function (baseName) { + // This indexOf lookup may be O(n), but the typical number of base + // names is very small, and indexOf is a native Array method. + if (bases.indexOf(baseName) < 0) { + bases.push(baseName); + } + }); + return this; // For chaining. + }; + return Def; +}()); +exports.Def = Def; +var Field = /** @class */ (function () { + function Field(name, type, defaultFn, hidden) { + this.name = name; + this.type = type; + this.defaultFn = defaultFn; + this.hidden = !!hidden; + } + Field.prototype.toString = function () { + return JSON.stringify(this.name) + ": " + this.type; + }; + Field.prototype.getValue = function (obj) { + var value = obj[this.name]; + if (typeof value !== "undefined") { + return value; + } + if (typeof this.defaultFn === "function") { + value = this.defaultFn.call(obj); + } + return value; + }; + return Field; +}()); +function shallowStringify(value) { + if (Array.isArray(value)) { + return "[" + value.map(shallowStringify).join(", ") + "]"; + } + if (value && typeof value === "object") { + return "{ " + Object.keys(value).map(function (key) { + return key + ": " + value[key]; + }).join(", ") + " }"; + } + return JSON.stringify(value); +} +function typesPlugin(_fork) { + var Type = { + or: function () { + var types = []; + for (var _i = 0; _i < arguments.length; _i++) { + types[_i] = arguments[_i]; + } + return new OrType(types.map(function (type) { return Type.from(type); })); + }, + from: function (value, name) { + if (value instanceof ArrayType || + value instanceof IdentityType || + value instanceof ObjectType || + value instanceof OrType || + value instanceof PredicateType) { + return value; + } + // The Def type is used as a helper for constructing compound + // interface types for AST nodes. + if (value instanceof Def) { + return value.type; + } + // Support [ElemType] syntax. + if (isArray.check(value)) { + if (value.length !== 1) { + throw new Error("only one element type is permitted for typed arrays"); + } + return new ArrayType(Type.from(value[0])); + } + // Support { someField: FieldType, ... } syntax. + if (isObject.check(value)) { + return new ObjectType(Object.keys(value).map(function (name) { + return new Field(name, Type.from(value[name], name)); + })); + } + if (typeof value === "function") { + var bicfIndex = builtInCtorFns.indexOf(value); + if (bicfIndex >= 0) { + return builtInCtorTypes[bicfIndex]; + } + if (typeof name !== "string") { + throw new Error("missing name"); + } + return new PredicateType(name, value); + } + // As a last resort, toType returns a type that matches any value that + // is === from. This is primarily useful for literal values like + // toType(null), but it has the additional advantage of allowing + // toType to be a total function. + return new IdentityType(value); + }, + // Define a type whose name is registered in a namespace (the defCache) so + // that future definitions will return the same type given the same name. + // In particular, this system allows for circular and forward definitions. + // The Def object d returned from Type.def may be used to configure the + // type d.type by calling methods such as d.bases, d.build, and d.field. + def: function (typeName) { + return hasOwn.call(defCache, typeName) + ? defCache[typeName] + : defCache[typeName] = new DefImpl(typeName); + }, + hasDef: function (typeName) { + return hasOwn.call(defCache, typeName); + } + }; + var builtInCtorFns = []; + var builtInCtorTypes = []; + function defBuiltInType(name, example) { + var objStr = objToStr.call(example); + var type = new PredicateType(name, function (value) { return objToStr.call(value) === objStr; }); + if (example && typeof example.constructor === "function") { + builtInCtorFns.push(example.constructor); + builtInCtorTypes.push(type); + } + return type; + } + // These types check the underlying [[Class]] attribute of the given + // value, rather than using the problematic typeof operator. Note however + // that no subtyping is considered; so, for instance, isObject.check + // returns false for [], /./, new Date, and null. + var isString = defBuiltInType("string", "truthy"); + var isFunction = defBuiltInType("function", function () { }); + var isArray = defBuiltInType("array", []); + var isObject = defBuiltInType("object", {}); + var isRegExp = defBuiltInType("RegExp", /./); + var isDate = defBuiltInType("Date", new Date()); + var isNumber = defBuiltInType("number", 3); + var isBoolean = defBuiltInType("boolean", true); + var isNull = defBuiltInType("null", null); + var isUndefined = defBuiltInType("undefined", undefined); + var builtInTypes = { + string: isString, + function: isFunction, + array: isArray, + object: isObject, + RegExp: isRegExp, + Date: isDate, + number: isNumber, + boolean: isBoolean, + null: isNull, + undefined: isUndefined, + }; + // In order to return the same Def instance every time Type.def is called + // with a particular name, those instances need to be stored in a cache. + var defCache = Object.create(null); + function defFromValue(value) { + if (value && typeof value === "object") { + var type = value.type; + if (typeof type === "string" && + hasOwn.call(defCache, type)) { + var d = defCache[type]; + if (d.finalized) { + return d; + } + } + } + return null; + } + var DefImpl = /** @class */ (function (_super) { + tslib_1.__extends(DefImpl, _super); + function DefImpl(typeName) { + var _this = _super.call(this, new PredicateType(typeName, function (value, deep) { return _this.check(value, deep); }), typeName) || this; + return _this; + } + DefImpl.prototype.check = function (value, deep) { + if (this.finalized !== true) { + throw new Error("prematurely checking unfinalized type " + this.typeName); + } + // A Def type can only match an object value. + if (value === null || typeof value !== "object") { + return false; + } + var vDef = defFromValue(value); + if (!vDef) { + // If we couldn't infer the Def associated with the given value, + // and we expected it to be a SourceLocation or a Position, it was + // probably just missing a "type" field (because Esprima does not + // assign a type property to such nodes). Be optimistic and let + // this.checkAllFields make the final decision. + if (this.typeName === "SourceLocation" || + this.typeName === "Position") { + return this.checkAllFields(value, deep); + } + // Calling this.checkAllFields for any other type of node is both + // bad for performance and way too forgiving. + return false; + } + // If checking deeply and vDef === this, then we only need to call + // checkAllFields once. Calling checkAllFields is too strict when deep + // is false, because then we only care about this.isSupertypeOf(vDef). + if (deep && vDef === this) { + return this.checkAllFields(value, deep); + } + // In most cases we rely exclusively on isSupertypeOf to make O(1) + // subtyping determinations. This suffices in most situations outside + // of unit tests, since interface conformance is checked whenever new + // instances are created using builder functions. + if (!this.isSupertypeOf(vDef)) { + return false; + } + // The exception is when deep is true; then, we recursively check all + // fields. + if (!deep) { + return true; + } + // Use the more specific Def (vDef) to perform the deep check, but + // shallow-check fields defined by the less specific Def (this). + return vDef.checkAllFields(value, deep) + && this.checkAllFields(value, false); + }; + DefImpl.prototype.build = function () { + var _this = this; + var buildParams = []; + for (var _i = 0; _i < arguments.length; _i++) { + buildParams[_i] = arguments[_i]; + } + // Calling Def.prototype.build multiple times has the effect of merely + // redefining this property. + this.buildParams = buildParams; + if (this.buildable) { + // If this Def is already buildable, update self.buildParams and + // continue using the old builder function. + return this; + } + // Every buildable type will have its "type" field filled in + // automatically. This includes types that are not subtypes of Node, + // like SourceLocation, but that seems harmless (TODO?). + this.field("type", String, function () { return _this.typeName; }); + // Override Dp.buildable for this Def instance. + this.buildable = true; + var addParam = function (built, param, arg, isArgAvailable) { + if (hasOwn.call(built, param)) + return; + var all = _this.allFields; + if (!hasOwn.call(all, param)) { + throw new Error("" + param); + } + var field = all[param]; + var type = field.type; + var value; + if (isArgAvailable) { + value = arg; + } + else if (field.defaultFn) { + // Expose the partially-built object to the default + // function as its `this` object. + value = field.defaultFn.call(built); + } + else { + var message = "no value or default function given for field " + + JSON.stringify(param) + " of " + _this.typeName + "(" + + _this.buildParams.map(function (name) { + return all[name]; + }).join(", ") + ")"; + throw new Error(message); + } + if (!type.check(value)) { + throw new Error(shallowStringify(value) + + " does not match field " + field + + " of type " + _this.typeName); + } + built[param] = value; + }; + // Calling the builder function will construct an instance of the Def, + // with positional arguments mapped to the fields original passed to .build. + // If not enough arguments are provided, the default value for the remaining fields + // will be used. + var builder = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + var argc = args.length; + if (!_this.finalized) { + throw new Error("attempting to instantiate unfinalized type " + + _this.typeName); + } + var built = Object.create(nodePrototype); + _this.buildParams.forEach(function (param, i) { + if (i < argc) { + addParam(built, param, args[i], true); + } + else { + addParam(built, param, null, false); + } + }); + Object.keys(_this.allFields).forEach(function (param) { + // Use the default value. + addParam(built, param, null, false); + }); + // Make sure that the "type" field was filled automatically. + if (built.type !== _this.typeName) { + throw new Error(""); + } + return built; + }; + // Calling .from on the builder function will construct an instance of the Def, + // using field values from the passed object. For fields missing from the passed object, + // their default value will be used. + builder.from = function (obj) { + if (!_this.finalized) { + throw new Error("attempting to instantiate unfinalized type " + + _this.typeName); + } + var built = Object.create(nodePrototype); + Object.keys(_this.allFields).forEach(function (param) { + if (hasOwn.call(obj, param)) { + addParam(built, param, obj[param], true); + } + else { + addParam(built, param, null, false); + } + }); + // Make sure that the "type" field was filled automatically. + if (built.type !== _this.typeName) { + throw new Error(""); + } + return built; + }; + Object.defineProperty(builders, getBuilderName(this.typeName), { + enumerable: true, + value: builder + }); + return this; + }; + // The reason fields are specified using .field(...) instead of an object + // literal syntax is somewhat subtle: the object literal syntax would + // support only one key and one value, but with .field(...) we can pass + // any number of arguments to specify the field. + DefImpl.prototype.field = function (name, type, defaultFn, hidden) { + if (this.finalized) { + console.error("Ignoring attempt to redefine field " + + JSON.stringify(name) + " of finalized type " + + JSON.stringify(this.typeName)); + return this; + } + this.ownFields[name] = new Field(name, Type.from(type), defaultFn, hidden); + return this; // For chaining. + }; + DefImpl.prototype.finalize = function () { + var _this = this; + // It's not an error to finalize a type more than once, but only the + // first call to .finalize does anything. + if (!this.finalized) { + var allFields = this.allFields; + var allSupertypes = this.allSupertypes; + this.baseNames.forEach(function (name) { + var def = defCache[name]; + if (def instanceof Def) { + def.finalize(); + extend(allFields, def.allFields); + extend(allSupertypes, def.allSupertypes); + } + else { + var message = "unknown supertype name " + + JSON.stringify(name) + + " for subtype " + + JSON.stringify(_this.typeName); + throw new Error(message); + } + }); + // TODO Warn if fields are overridden with incompatible types. + extend(allFields, this.ownFields); + allSupertypes[this.typeName] = this; + this.fieldNames.length = 0; + for (var fieldName in allFields) { + if (hasOwn.call(allFields, fieldName) && + !allFields[fieldName].hidden) { + this.fieldNames.push(fieldName); + } + } + // Types are exported only once they have been finalized. + Object.defineProperty(namedTypes, this.typeName, { + enumerable: true, + value: this.type + }); + this.finalized = true; + // A linearization of the inheritance hierarchy. + populateSupertypeList(this.typeName, this.supertypeList); + if (this.buildable && + this.supertypeList.lastIndexOf("Expression") >= 0) { + wrapExpressionBuilderWithStatement(this.typeName); + } + } + }; + return DefImpl; + }(Def)); + // Note that the list returned by this function is a copy of the internal + // supertypeList, *without* the typeName itself as the first element. + function getSupertypeNames(typeName) { + if (!hasOwn.call(defCache, typeName)) { + throw new Error(""); + } + var d = defCache[typeName]; + if (d.finalized !== true) { + throw new Error(""); + } + return d.supertypeList.slice(1); + } + // Returns an object mapping from every known type in the defCache to the + // most specific supertype whose name is an own property of the candidates + // object. + function computeSupertypeLookupTable(candidates) { + var table = {}; + var typeNames = Object.keys(defCache); + var typeNameCount = typeNames.length; + for (var i = 0; i < typeNameCount; ++i) { + var typeName = typeNames[i]; + var d = defCache[typeName]; + if (d.finalized !== true) { + throw new Error("" + typeName); + } + for (var j = 0; j < d.supertypeList.length; ++j) { + var superTypeName = d.supertypeList[j]; + if (hasOwn.call(candidates, superTypeName)) { + table[typeName] = superTypeName; + break; + } + } + } + return table; + } + var builders = Object.create(null); + // This object is used as prototype for any node created by a builder. + var nodePrototype = {}; + // Call this function to define a new method to be shared by all AST + // nodes. The replaced method (if any) is returned for easy wrapping. + function defineMethod(name, func) { + var old = nodePrototype[name]; + // Pass undefined as func to delete nodePrototype[name]. + if (isUndefined.check(func)) { + delete nodePrototype[name]; + } + else { + isFunction.assert(func); + Object.defineProperty(nodePrototype, name, { + enumerable: true, + configurable: true, + value: func + }); + } + return old; + } + function getBuilderName(typeName) { + return typeName.replace(/^[A-Z]+/, function (upperCasePrefix) { + var len = upperCasePrefix.length; + switch (len) { + case 0: return ""; + // If there's only one initial capital letter, just lower-case it. + case 1: return upperCasePrefix.toLowerCase(); + default: + // If there's more than one initial capital letter, lower-case + // all but the last one, so that XMLDefaultDeclaration (for + // example) becomes xmlDefaultDeclaration. + return upperCasePrefix.slice(0, len - 1).toLowerCase() + + upperCasePrefix.charAt(len - 1); + } + }); + } + function getStatementBuilderName(typeName) { + typeName = getBuilderName(typeName); + return typeName.replace(/(Expression)?$/, "Statement"); + } + var namedTypes = {}; + // Like Object.keys, but aware of what fields each AST type should have. + function getFieldNames(object) { + var d = defFromValue(object); + if (d) { + return d.fieldNames.slice(0); + } + if ("type" in object) { + throw new Error("did not recognize object of type " + + JSON.stringify(object.type)); + } + return Object.keys(object); + } + // Get the value of an object property, taking object.type and default + // functions into account. + function getFieldValue(object, fieldName) { + var d = defFromValue(object); + if (d) { + var field = d.allFields[fieldName]; + if (field) { + return field.getValue(object); + } + } + return object && object[fieldName]; + } + // Iterate over all defined fields of an object, including those missing + // or undefined, passing each field name and effective value (as returned + // by getFieldValue) to the callback. If the object has no corresponding + // Def, the callback will never be called. + function eachField(object, callback, context) { + getFieldNames(object).forEach(function (name) { + callback.call(this, name, getFieldValue(object, name)); + }, context); + } + // Similar to eachField, except that iteration stops as soon as the + // callback returns a truthy value. Like Array.prototype.some, the final + // result is either true or false to indicates whether the callback + // returned true for any element or not. + function someField(object, callback, context) { + return getFieldNames(object).some(function (name) { + return callback.call(this, name, getFieldValue(object, name)); + }, context); + } + // Adds an additional builder for Expression subtypes + // that wraps the built Expression in an ExpressionStatements. + function wrapExpressionBuilderWithStatement(typeName) { + var wrapperName = getStatementBuilderName(typeName); + // skip if the builder already exists + if (builders[wrapperName]) + return; + // the builder function to wrap with builders.ExpressionStatement + var wrapped = builders[getBuilderName(typeName)]; + // skip if there is nothing to wrap + if (!wrapped) + return; + var builder = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + return builders.expressionStatement(wrapped.apply(builders, args)); + }; + builder.from = function () { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + return builders.expressionStatement(wrapped.from.apply(builders, args)); + }; + builders[wrapperName] = builder; + } + function populateSupertypeList(typeName, list) { + list.length = 0; + list.push(typeName); + var lastSeen = Object.create(null); + for (var pos = 0; pos < list.length; ++pos) { + typeName = list[pos]; + var d = defCache[typeName]; + if (d.finalized !== true) { + throw new Error(""); + } + // If we saw typeName earlier in the breadth-first traversal, + // delete the last-seen occurrence. + if (hasOwn.call(lastSeen, typeName)) { + delete list[lastSeen[typeName]]; + } + // Record the new index of the last-seen occurrence of typeName. + lastSeen[typeName] = pos; + // Enqueue the base names of this type. + list.push.apply(list, d.baseNames); + } + // Compaction loop to remove array holes. + for (var to = 0, from = to, len = list.length; from < len; ++from) { + if (hasOwn.call(list, from)) { + list[to++] = list[from]; + } + } + list.length = to; + } + function extend(into, from) { + Object.keys(from).forEach(function (name) { + into[name] = from[name]; + }); + return into; + } + function finalize() { + Object.keys(defCache).forEach(function (name) { + defCache[name].finalize(); + }); + } + return { + Type: Type, + builtInTypes: builtInTypes, + getSupertypeNames: getSupertypeNames, + computeSupertypeLookupTable: computeSupertypeLookupTable, + builders: builders, + defineMethod: defineMethod, + getBuilderName: getBuilderName, + getStatementBuilderName: getStatementBuilderName, + namedTypes: namedTypes, + getFieldNames: getFieldNames, + getFieldValue: getFieldValue, + eachField: eachField, + someField: someField, + finalize: finalize, + }; +} +exports["default"] = typesPlugin; +; + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/main.js": +/*!*******************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/main.js ***! + \*******************************************************************************************************/ +/***/ ((__unused_webpack_module, exports, __webpack_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.visit = exports.use = exports.Type = exports.someField = exports.PathVisitor = exports.Path = exports.NodePath = exports.namedTypes = exports.getSupertypeNames = exports.getFieldValue = exports.getFieldNames = exports.getBuilderName = exports.finalize = exports.eachField = exports.defineMethod = exports.builtInTypes = exports.builders = exports.astNodesAreEquivalent = void 0; +var tslib_1 = __webpack_require__(/*! tslib */ "../../../.yarn/berry/cache/tslib-npm-2.4.0-9cb6dc5030-9.zip/node_modules/tslib/tslib.es6.js"); +var fork_1 = tslib_1.__importDefault(__webpack_require__(/*! ./fork */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/fork.js")); +var core_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/core */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/core.js")); +var es6_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/es6 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es6.js")); +var es7_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/es7 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es7.js")); +var es2020_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/es2020 */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es2020.js")); +var jsx_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/jsx */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/jsx.js")); +var flow_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/flow */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/flow.js")); +var esprima_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/esprima */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/esprima.js")); +var babel_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/babel */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/babel.js")); +var typescript_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/typescript */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/typescript.js")); +var es_proposals_1 = tslib_1.__importDefault(__webpack_require__(/*! ./def/es-proposals */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/def/es-proposals.js")); +var namedTypes_1 = __webpack_require__(/*! ./gen/namedTypes */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/gen/namedTypes.js"); +Object.defineProperty(exports, "namedTypes", ({ enumerable: true, get: function () { return namedTypes_1.namedTypes; } })); +var _a = fork_1.default([ + // This core module of AST types captures ES5 as it is parsed today by + // git://github.com/ariya/esprima.git#master. + core_1.default, + // Feel free to add to or remove from this list of extension modules to + // configure the precise type hierarchy that you need. + es6_1.default, + es7_1.default, + es2020_1.default, + jsx_1.default, + flow_1.default, + esprima_1.default, + babel_1.default, + typescript_1.default, + es_proposals_1.default, +]), astNodesAreEquivalent = _a.astNodesAreEquivalent, builders = _a.builders, builtInTypes = _a.builtInTypes, defineMethod = _a.defineMethod, eachField = _a.eachField, finalize = _a.finalize, getBuilderName = _a.getBuilderName, getFieldNames = _a.getFieldNames, getFieldValue = _a.getFieldValue, getSupertypeNames = _a.getSupertypeNames, n = _a.namedTypes, NodePath = _a.NodePath, Path = _a.Path, PathVisitor = _a.PathVisitor, someField = _a.someField, Type = _a.Type, use = _a.use, visit = _a.visit; +exports.astNodesAreEquivalent = astNodesAreEquivalent; +exports.builders = builders; +exports.builtInTypes = builtInTypes; +exports.defineMethod = defineMethod; +exports.eachField = eachField; +exports.finalize = finalize; +exports.getBuilderName = getBuilderName; +exports.getFieldNames = getFieldNames; +exports.getFieldValue = getFieldValue; +exports.getSupertypeNames = getSupertypeNames; +exports.NodePath = NodePath; +exports.Path = Path; +exports.PathVisitor = PathVisitor; +exports.someField = someField; +exports.Type = Type; +exports.use = use; +exports.visit = visit; +// Populate the exported fields of the namedTypes namespace, while still +// retaining its member types. +Object.assign(namedTypes_1.namedTypes, n); + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/bytes-npm-3.1.2-28b8643004-9.zip/node_modules/bytes/index.js": +/*!***********************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/bytes-npm-3.1.2-28b8643004-9.zip/node_modules/bytes/index.js ***! + \***********************************************************************************************/ +/***/ ((module) => { + +"use strict"; +/*! + * bytes + * Copyright(c) 2012-2014 TJ Holowaychuk + * Copyright(c) 2015 Jed Watson + * MIT Licensed + */ + + + +/** + * Module exports. + * @public + */ + +module.exports = bytes; +module.exports.format = format; +module.exports.parse = parse; + +/** + * Module variables. + * @private + */ + +var formatThousandsRegExp = /\B(?=(\d{3})+(?!\d))/g; + +var formatDecimalsRegExp = /(?:\.0*|(\.[^0]+)0+)$/; + +var map = { + b: 1, + kb: 1 << 10, + mb: 1 << 20, + gb: 1 << 30, + tb: Math.pow(1024, 4), + pb: Math.pow(1024, 5), +}; + +var parseRegExp = /^((-|\+)?(\d+(?:\.\d+)?)) *(kb|mb|gb|tb|pb)$/i; + +/** + * Convert the given value in bytes into a string or parse to string to an integer in bytes. + * + * @param {string|number} value + * @param {{ + * case: [string], + * decimalPlaces: [number] + * fixedDecimals: [boolean] + * thousandsSeparator: [string] + * unitSeparator: [string] + * }} [options] bytes options. + * + * @returns {string|number|null} + */ + +function bytes(value, options) { + if (typeof value === 'string') { + return parse(value); + } + + if (typeof value === 'number') { + return format(value, options); + } + + return null; +} + +/** + * Format the given value in bytes into a string. + * + * If the value is negative, it is kept as such. If it is a float, + * it is rounded. + * + * @param {number} value + * @param {object} [options] + * @param {number} [options.decimalPlaces=2] + * @param {number} [options.fixedDecimals=false] + * @param {string} [options.thousandsSeparator=] + * @param {string} [options.unit=] + * @param {string} [options.unitSeparator=] + * + * @returns {string|null} + * @public + */ + +function format(value, options) { + if (!Number.isFinite(value)) { + return null; + } + + var mag = Math.abs(value); + var thousandsSeparator = (options && options.thousandsSeparator) || ''; + var unitSeparator = (options && options.unitSeparator) || ''; + var decimalPlaces = (options && options.decimalPlaces !== undefined) ? options.decimalPlaces : 2; + var fixedDecimals = Boolean(options && options.fixedDecimals); + var unit = (options && options.unit) || ''; + + if (!unit || !map[unit.toLowerCase()]) { + if (mag >= map.pb) { + unit = 'PB'; + } else if (mag >= map.tb) { + unit = 'TB'; + } else if (mag >= map.gb) { + unit = 'GB'; + } else if (mag >= map.mb) { + unit = 'MB'; + } else if (mag >= map.kb) { + unit = 'KB'; + } else { + unit = 'B'; + } + } + + var val = value / map[unit.toLowerCase()]; + var str = val.toFixed(decimalPlaces); + + if (!fixedDecimals) { + str = str.replace(formatDecimalsRegExp, '$1'); + } + + if (thousandsSeparator) { + str = str.split('.').map(function (s, i) { + return i === 0 + ? s.replace(formatThousandsRegExp, thousandsSeparator) + : s + }).join('.'); + } + + return str + unitSeparator + unit; +} + +/** + * Parse the string value into an integer in bytes. + * + * If no unit is given, it is assumed the value is in bytes. + * + * @param {number|string} val + * + * @returns {number|null} + * @public + */ + +function parse(val) { + if (typeof val === 'number' && !isNaN(val)) { + return val; + } + + if (typeof val !== 'string') { + return null; + } + + // Test if the string passed is valid + var results = parseRegExp.exec(val); + var floatValue; + var unit = 'b'; + + if (!results) { + // Nothing could be extracted from the given string + floatValue = parseInt(val, 10); + unit = 'b' + } else { + // Retrieve the value and the unit + floatValue = parseFloat(results[1]); + unit = results[4].toLowerCase(); + } + + if (isNaN(floatValue)) { + return null; + } + + return Math.floor(map[unit] * floatValue); +} + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/core-util-is-npm-1.0.3-ca74b76c90-9.zip/node_modules/core-util-is/lib/util.js": +/*!****************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/core-util-is-npm-1.0.3-ca74b76c90-9.zip/node_modules/core-util-is/lib/util.js ***! + \****************************************************************************************************************/ +/***/ ((__unused_webpack_module, exports, __webpack_require__) => { + +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. + +function isArray(arg) { + if (Array.isArray) { + return Array.isArray(arg); + } + return objectToString(arg) === '[object Array]'; +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return (objectToString(e) === '[object Error]' || e instanceof Error); +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +exports.isBuffer = __webpack_require__(/*! buffer */ "buffer").Buffer.isBuffer; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/data-uri-to-buffer-npm-3.0.1-830646f9ee-9.zip/node_modules/data-uri-to-buffer/dist/src/index.js": +/*!**********************************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/data-uri-to-buffer-npm-3.0.1-830646f9ee-9.zip/node_modules/data-uri-to-buffer/dist/src/index.js ***! + \**********************************************************************************************************************************/ +/***/ ((module) => { + +"use strict"; + +/** + * Returns a `Buffer` instance from the given data URI `uri`. + * + * @param {String} uri Data URI to turn into a Buffer instance + * @return {Buffer} Buffer instance from Data URI + * @api public + */ +function dataUriToBuffer(uri) { + if (!/^data:/i.test(uri)) { + throw new TypeError('`uri` does not appear to be a Data URI (must begin with "data:")'); + } + // strip newlines + uri = uri.replace(/\r?\n/g, ''); + // split the URI up into the "metadata" and the "data" portions + const firstComma = uri.indexOf(','); + if (firstComma === -1 || firstComma <= 4) { + throw new TypeError('malformed data: URI'); + } + // remove the "data:" scheme and parse the metadata + const meta = uri.substring(5, firstComma).split(';'); + let charset = ''; + let base64 = false; + const type = meta[0] || 'text/plain'; + let typeFull = type; + for (let i = 1; i < meta.length; i++) { + if (meta[i] === 'base64') { + base64 = true; + } + else { + typeFull += `;${meta[i]}`; + if (meta[i].indexOf('charset=') === 0) { + charset = meta[i].substring(8); + } + } + } + // defaults to US-ASCII only if type is not provided + if (!meta[0] && !charset.length) { + typeFull += ';charset=US-ASCII'; + charset = 'US-ASCII'; + } + // get the encoded data portion and decode URI-encoded chars + const encoding = base64 ? 'base64' : 'ascii'; + const data = unescape(uri.substring(firstComma + 1)); + const buffer = Buffer.from(data, encoding); + // set `.type` and `.typeFull` properties to MIME type + buffer.type = type; + buffer.typeFull = typeFull; + // set the `.charset` property + buffer.charset = charset; + return buffer; +} +module.exports = dataUriToBuffer; +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ "../../../.yarn/berry/cache/degenerator-npm-3.0.2-3b38df9d12-9.zip/node_modules/degenerator/dist/src/index.js": +/*!********************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/degenerator-npm-3.0.2-3b38df9d12-9.zip/node_modules/degenerator/dist/src/index.js ***! + \********************************************************************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; + +const util_1 = __webpack_require__(/*! util */ "util"); +const escodegen_1 = __webpack_require__(/*! escodegen */ "../../../.yarn/berry/cache/escodegen-npm-1.14.3-a4dedc6eeb-9.zip/node_modules/escodegen/escodegen.js"); +const esprima_1 = __webpack_require__(/*! esprima */ "../../../.yarn/berry/cache/esprima-npm-4.0.1-1084e98778-9.zip/node_modules/esprima/dist/esprima.js"); +const ast_types_1 = __webpack_require__(/*! ast-types */ "../../../.yarn/berry/cache/ast-types-npm-0.13.4-69f7e68df8-9.zip/node_modules/ast-types/main.js"); +const vm2_1 = __webpack_require__(/*! vm2 */ "../../../.yarn/berry/cache/vm2-patch-b52c0bba95-9.zip/node_modules/vm2/index.js"); +/** + * Compiles sync JavaScript code into JavaScript with async Functions. + * + * @param {String} code JavaScript string to convert + * @param {Array} names Array of function names to add `await` operators to + * @return {String} Converted JavaScript string with async/await injected + * @api public + */ +function degenerator(code, _names) { + if (!Array.isArray(_names)) { + throw new TypeError('an array of async function "names" is required'); + } + // Duplicate the `names` array since it's rude to augment the user args + const names = _names.slice(0); + const ast = esprima_1.parseScript(code); + // First pass is to find the `function` nodes and turn them into async or + // generator functions only if their body includes `CallExpressions` to + // function in `names`. We also add the names of the functions to the `names` + // array. We'll iterate several time, as every iteration might add new items + // to the `names` array, until no new names were added in the iteration. + let lastNamesLength = 0; + do { + lastNamesLength = names.length; + ast_types_1.visit(ast, { + visitVariableDeclaration(path) { + if (path.node.declarations) { + for (let i = 0; i < path.node.declarations.length; i++) { + const declaration = path.node.declarations[i]; + if (ast_types_1.namedTypes.VariableDeclarator.check(declaration) && + ast_types_1.namedTypes.Identifier.check(declaration.init) && + ast_types_1.namedTypes.Identifier.check(declaration.id) && + checkName(declaration.init.name, names) && + !checkName(declaration.id.name, names)) { + names.push(declaration.id.name); + } + } + } + return false; + }, + visitAssignmentExpression(path) { + if (ast_types_1.namedTypes.Identifier.check(path.node.left) && + ast_types_1.namedTypes.Identifier.check(path.node.right) && + checkName(path.node.right.name, names) && + !checkName(path.node.left.name, names)) { + names.push(path.node.left.name); + } + return false; + }, + visitFunction(path) { + if (path.node.id) { + let shouldDegenerate = false; + ast_types_1.visit(path.node, { + visitCallExpression(path) { + if (checkNames(path.node, names)) { + shouldDegenerate = true; + } + return false; + }, + }); + if (!shouldDegenerate) { + return false; + } + // Got a "function" expression/statement, + // convert it into an async function + path.node.async = true; + // Add function name to `names` array + if (!checkName(path.node.id.name, names)) { + names.push(path.node.id.name); + } + } + this.traverse(path); + }, + }); + } while (lastNamesLength !== names.length); + // Second pass is for adding `await`/`yield` statements to any function + // invocations that match the given `names` array. + ast_types_1.visit(ast, { + visitCallExpression(path) { + if (checkNames(path.node, names)) { + // A "function invocation" expression, + // we need to inject a `AwaitExpression`/`YieldExpression` + const delegate = false; + const { name, parent: { node: pNode }, } = path; + const expr = ast_types_1.builders.awaitExpression(path.node, delegate); + if (ast_types_1.namedTypes.CallExpression.check(pNode)) { + pNode.arguments[name] = expr; + } + else { + pNode[name] = expr; + } + } + this.traverse(path); + }, + }); + return escodegen_1.generate(ast); +} +(function (degenerator) { + function compile(code, returnName, names, options = {}) { + const compiled = degenerator(code, names); + const vm = new vm2_1.VM(options); + const script = new vm2_1.VMScript(`${compiled};${returnName}`, { + filename: options.filename, + }); + const fn = vm.run(script); + if (typeof fn !== 'function') { + throw new Error(`Expected a "function" to be returned for \`${returnName}\`, but got "${typeof fn}"`); + } + const r = function (...args) { + try { + const p = fn.apply(this, args); + if (typeof (p === null || p === void 0 ? void 0 : p.then) === 'function') { + return p; + } + return Promise.resolve(p); + } + catch (err) { + return Promise.reject(err); + } + }; + Object.defineProperty(r, 'toString', { + value: fn.toString.bind(fn), + enumerable: false, + }); + return r; + } + degenerator.compile = compile; +})(degenerator || (degenerator = {})); +/** + * Returns `true` if `node` has a matching name to one of the entries in the + * `names` array. + * + * @param {types.Node} node + * @param {Array} names Array of function names to return true for + * @return {Boolean} + * @api private + */ +function checkNames({ callee }, names) { + let name; + if (ast_types_1.namedTypes.Identifier.check(callee)) { + name = callee.name; + } + else if (ast_types_1.namedTypes.MemberExpression.check(callee)) { + if (ast_types_1.namedTypes.Identifier.check(callee.object) && + ast_types_1.namedTypes.Identifier.check(callee.property)) { + name = `${callee.object.name}.${callee.property.name}`; + } + else { + return false; + } + } + else if (ast_types_1.namedTypes.FunctionExpression.check(callee)) { + if (callee.id) { + name = callee.id.name; + } + else { + return false; + } + } + else { + throw new Error(`Don't know how to get name for: ${callee.type}`); + } + return checkName(name, names); +} +function checkName(name, names) { + // now that we have the `name`, check if any entries match in the `names` array + for (let i = 0; i < names.length; i++) { + const n = names[i]; + if (util_1.isRegExp(n)) { + if (n.test(name)) { + return true; + } + } + else if (name === n) { + return true; + } + } + return false; +} +module.exports = degenerator; +//# sourceMappingURL=index.js.map + +/***/ }), + +/***/ "../../../.yarn/berry/cache/depd-npm-2.0.0-b6c51a4b43-9.zip/node_modules/depd/index.js": +/*!*********************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/depd-npm-2.0.0-b6c51a4b43-9.zip/node_modules/depd/index.js ***! + \*********************************************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +/*! + * depd + * Copyright(c) 2014-2018 Douglas Christopher Wilson + * MIT Licensed + */ + +/** + * Module dependencies. + */ + +var relative = (__webpack_require__(/*! path */ "path").relative) + +/** + * Module exports. + */ + +module.exports = depd + +/** + * Get the path to base files on. + */ + +var basePath = process.cwd() + +/** + * Determine if namespace is contained in the string. + */ + +function containsNamespace (str, namespace) { + var vals = str.split(/[ ,]+/) + var ns = String(namespace).toLowerCase() + + for (var i = 0; i < vals.length; i++) { + var val = vals[i] + + // namespace contained + if (val && (val === '*' || val.toLowerCase() === ns)) { + return true + } + } + + return false +} + +/** + * Convert a data descriptor to accessor descriptor. + */ + +function convertDataDescriptorToAccessor (obj, prop, message) { + var descriptor = Object.getOwnPropertyDescriptor(obj, prop) + var value = descriptor.value + + descriptor.get = function getter () { return value } + + if (descriptor.writable) { + descriptor.set = function setter (val) { return (value = val) } + } + + delete descriptor.value + delete descriptor.writable + + Object.defineProperty(obj, prop, descriptor) + + return descriptor +} + +/** + * Create arguments string to keep arity. + */ + +function createArgumentsString (arity) { + var str = '' + + for (var i = 0; i < arity; i++) { + str += ', arg' + i + } + + return str.substr(2) +} + +/** + * Create stack string from stack. + */ + +function createStackString (stack) { + var str = this.name + ': ' + this.namespace + + if (this.message) { + str += ' deprecated ' + this.message + } + + for (var i = 0; i < stack.length; i++) { + str += '\n at ' + stack[i].toString() + } + + return str +} + +/** + * Create deprecate for namespace in caller. + */ + +function depd (namespace) { + if (!namespace) { + throw new TypeError('argument namespace is required') + } + + var stack = getStack() + var site = callSiteLocation(stack[1]) + var file = site[0] + + function deprecate (message) { + // call to self as log + log.call(deprecate, message) + } + + deprecate._file = file + deprecate._ignored = isignored(namespace) + deprecate._namespace = namespace + deprecate._traced = istraced(namespace) + deprecate._warned = Object.create(null) + + deprecate.function = wrapfunction + deprecate.property = wrapproperty + + return deprecate +} + +/** + * Determine if event emitter has listeners of a given type. + * + * The way to do this check is done three different ways in Node.js >= 0.8 + * so this consolidates them into a minimal set using instance methods. + * + * @param {EventEmitter} emitter + * @param {string} type + * @returns {boolean} + * @private + */ + +function eehaslisteners (emitter, type) { + var count = typeof emitter.listenerCount !== 'function' + ? emitter.listeners(type).length + : emitter.listenerCount(type) + + return count > 0 +} + +/** + * Determine if namespace is ignored. + */ + +function isignored (namespace) { + if (process.noDeprecation) { + // --no-deprecation support + return true + } + + var str = process.env.NO_DEPRECATION || '' + + // namespace ignored + return containsNamespace(str, namespace) +} + +/** + * Determine if namespace is traced. + */ + +function istraced (namespace) { + if (process.traceDeprecation) { + // --trace-deprecation support + return true + } + + var str = process.env.TRACE_DEPRECATION || '' + + // namespace traced + return containsNamespace(str, namespace) +} + +/** + * Display deprecation message. + */ + +function log (message, site) { + var haslisteners = eehaslisteners(process, 'deprecation') + + // abort early if no destination + if (!haslisteners && this._ignored) { + return + } + + var caller + var callFile + var callSite + var depSite + var i = 0 + var seen = false + var stack = getStack() + var file = this._file + + if (site) { + // provided site + depSite = site + callSite = callSiteLocation(stack[1]) + callSite.name = depSite.name + file = callSite[0] + } else { + // get call site + i = 2 + depSite = callSiteLocation(stack[i]) + callSite = depSite + } + + // get caller of deprecated thing in relation to file + for (; i < stack.length; i++) { + caller = callSiteLocation(stack[i]) + callFile = caller[0] + + if (callFile === file) { + seen = true + } else if (callFile === this._file) { + file = this._file + } else if (seen) { + break + } + } + + var key = caller + ? depSite.join(':') + '__' + caller.join(':') + : undefined + + if (key !== undefined && key in this._warned) { + // already warned + return + } + + this._warned[key] = true + + // generate automatic message from call site + var msg = message + if (!msg) { + msg = callSite === depSite || !callSite.name + ? defaultMessage(depSite) + : defaultMessage(callSite) + } + + // emit deprecation if listeners exist + if (haslisteners) { + var err = DeprecationError(this._namespace, msg, stack.slice(i)) + process.emit('deprecation', err) + return + } + + // format and write message + var format = process.stderr.isTTY + ? formatColor + : formatPlain + var output = format.call(this, msg, caller, stack.slice(i)) + process.stderr.write(output + '\n', 'utf8') +} + +/** + * Get call site location as array. + */ + +function callSiteLocation (callSite) { + var file = callSite.getFileName() || '' + var line = callSite.getLineNumber() + var colm = callSite.getColumnNumber() + + if (callSite.isEval()) { + file = callSite.getEvalOrigin() + ', ' + file + } + + var site = [file, line, colm] + + site.callSite = callSite + site.name = callSite.getFunctionName() + + return site +} + +/** + * Generate a default message from the site. + */ + +function defaultMessage (site) { + var callSite = site.callSite + var funcName = site.name + + // make useful anonymous name + if (!funcName) { + funcName = '' + } + + var context = callSite.getThis() + var typeName = context && callSite.getTypeName() + + // ignore useless type name + if (typeName === 'Object') { + typeName = undefined + } + + // make useful type name + if (typeName === 'Function') { + typeName = context.name || typeName + } + + return typeName && callSite.getMethodName() + ? typeName + '.' + funcName + : funcName +} + +/** + * Format deprecation message without color. + */ + +function formatPlain (msg, caller, stack) { + var timestamp = new Date().toUTCString() + + var formatted = timestamp + + ' ' + this._namespace + + ' deprecated ' + msg + + // add stack trace + if (this._traced) { + for (var i = 0; i < stack.length; i++) { + formatted += '\n at ' + stack[i].toString() + } + + return formatted + } + + if (caller) { + formatted += ' at ' + formatLocation(caller) + } + + return formatted +} + +/** + * Format deprecation message with color. + */ + +function formatColor (msg, caller, stack) { + var formatted = '\x1b[36;1m' + this._namespace + '\x1b[22;39m' + // bold cyan + ' \x1b[33;1mdeprecated\x1b[22;39m' + // bold yellow + ' \x1b[0m' + msg + '\x1b[39m' // reset + + // add stack trace + if (this._traced) { + for (var i = 0; i < stack.length; i++) { + formatted += '\n \x1b[36mat ' + stack[i].toString() + '\x1b[39m' // cyan + } + + return formatted + } + + if (caller) { + formatted += ' \x1b[36m' + formatLocation(caller) + '\x1b[39m' // cyan + } + + return formatted +} + +/** + * Format call site location. + */ + +function formatLocation (callSite) { + return relative(basePath, callSite[0]) + + ':' + callSite[1] + + ':' + callSite[2] +} + +/** + * Get the stack as array of call sites. + */ + +function getStack () { + var limit = Error.stackTraceLimit + var obj = {} + var prep = Error.prepareStackTrace + + Error.prepareStackTrace = prepareObjectStackTrace + Error.stackTraceLimit = Math.max(10, limit) + + // capture the stack + Error.captureStackTrace(obj) + + // slice this function off the top + var stack = obj.stack.slice(1) + + Error.prepareStackTrace = prep + Error.stackTraceLimit = limit + + return stack +} + +/** + * Capture call site stack from v8. + */ + +function prepareObjectStackTrace (obj, stack) { + return stack +} + +/** + * Return a wrapped function in a deprecation message. + */ + +function wrapfunction (fn, message) { + if (typeof fn !== 'function') { + throw new TypeError('argument fn must be a function') + } + + var args = createArgumentsString(fn.length) + var stack = getStack() + var site = callSiteLocation(stack[1]) + + site.name = fn.name + + // eslint-disable-next-line no-new-func + var deprecatedfn = new Function('fn', 'log', 'deprecate', 'message', 'site', + '"use strict"\n' + + 'return function (' + args + ') {' + + 'log.call(deprecate, message, site)\n' + + 'return fn.apply(this, arguments)\n' + + '}')(fn, log, this, message, site) + + return deprecatedfn +} + +/** + * Wrap property in a deprecation message. + */ + +function wrapproperty (obj, prop, message) { + if (!obj || (typeof obj !== 'object' && typeof obj !== 'function')) { + throw new TypeError('argument obj must be object') + } + + var descriptor = Object.getOwnPropertyDescriptor(obj, prop) + + if (!descriptor) { + throw new TypeError('must call property on owner object') + } + + if (!descriptor.configurable) { + throw new TypeError('property must be configurable') + } + + var deprecate = this + var stack = getStack() + var site = callSiteLocation(stack[1]) + + // set site name + site.name = prop + + // convert data descriptor + if ('value' in descriptor) { + descriptor = convertDataDescriptorToAccessor(obj, prop, message) + } + + var get = descriptor.get + var set = descriptor.set + + // wrap getter + if (typeof get === 'function') { + descriptor.get = function getter () { + log.call(deprecate, message, site) + return get.apply(this, arguments) + } + } + + // wrap setter + if (typeof set === 'function') { + descriptor.set = function setter () { + log.call(deprecate, message, site) + return set.apply(this, arguments) + } + } + + Object.defineProperty(obj, prop, descriptor) +} + +/** + * Create DeprecationError for deprecation + */ + +function DeprecationError (namespace, message, stack) { + var error = new Error() + var stackString + + Object.defineProperty(error, 'constructor', { + value: DeprecationError + }) + + Object.defineProperty(error, 'message', { + configurable: true, + enumerable: false, + value: message, + writable: true + }) + + Object.defineProperty(error, 'name', { + enumerable: false, + configurable: true, + value: 'DeprecationError', + writable: true + }) + + Object.defineProperty(error, 'namespace', { + configurable: true, + enumerable: false, + value: namespace, + writable: true + }) + + Object.defineProperty(error, 'stack', { + configurable: true, + enumerable: false, + get: function () { + if (stackString !== undefined) { + return stackString + } + + // prepare stack trace + return (stackString = createStackString.call(this, stack)) + }, + set: function setter (val) { + stackString = val + } + }) + + return error +} + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/escodegen-npm-1.14.3-a4dedc6eeb-9.zip/node_modules/escodegen/escodegen.js": +/*!************************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/escodegen-npm-1.14.3-a4dedc6eeb-9.zip/node_modules/escodegen/escodegen.js ***! + \************************************************************************************************************/ +/***/ ((__unused_webpack_module, exports, __webpack_require__) => { + +/* + Copyright (C) 2012-2014 Yusuke Suzuki + Copyright (C) 2015 Ingvar Stepanyan + Copyright (C) 2014 Ivan Nikulin + Copyright (C) 2012-2013 Michael Ficarra + Copyright (C) 2012-2013 Mathias Bynens + Copyright (C) 2013 Irakli Gozalishvili + Copyright (C) 2012 Robert Gust-Bardon + Copyright (C) 2012 John Freeman + Copyright (C) 2011-2012 Ariya Hidayat + Copyright (C) 2012 Joost-Wim Boekesteijn + Copyright (C) 2012 Kris Kowal + Copyright (C) 2012 Arpad Borsos + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/*global exports:true, require:true, global:true*/ +(function () { + 'use strict'; + + var Syntax, + Precedence, + BinaryPrecedence, + SourceNode, + estraverse, + esutils, + base, + indent, + json, + renumber, + hexadecimal, + quotes, + escapeless, + newline, + space, + parentheses, + semicolons, + safeConcatenation, + directive, + extra, + parse, + sourceMap, + sourceCode, + preserveBlankLines, + FORMAT_MINIFY, + FORMAT_DEFAULTS; + + estraverse = __webpack_require__(/*! estraverse */ "../../../.yarn/berry/cache/estraverse-npm-4.3.0-920a32f3c6-9.zip/node_modules/estraverse/estraverse.js"); + esutils = __webpack_require__(/*! esutils */ "../../../.yarn/berry/cache/esutils-npm-2.0.3-f865beafd5-9.zip/node_modules/esutils/lib/utils.js"); + + Syntax = estraverse.Syntax; + + // Generation is done by generateExpression. + function isExpression(node) { + return CodeGenerator.Expression.hasOwnProperty(node.type); + } + + // Generation is done by generateStatement. + function isStatement(node) { + return CodeGenerator.Statement.hasOwnProperty(node.type); + } + + Precedence = { + Sequence: 0, + Yield: 1, + Assignment: 1, + Conditional: 2, + ArrowFunction: 2, + LogicalOR: 3, + LogicalAND: 4, + BitwiseOR: 5, + BitwiseXOR: 6, + BitwiseAND: 7, + Equality: 8, + Relational: 9, + BitwiseSHIFT: 10, + Additive: 11, + Multiplicative: 12, + Exponentiation: 13, + Await: 14, + Unary: 14, + Postfix: 15, + Call: 16, + New: 17, + TaggedTemplate: 18, + Member: 19, + Primary: 20 + }; + + BinaryPrecedence = { + '||': Precedence.LogicalOR, + '&&': Precedence.LogicalAND, + '|': Precedence.BitwiseOR, + '^': Precedence.BitwiseXOR, + '&': Precedence.BitwiseAND, + '==': Precedence.Equality, + '!=': Precedence.Equality, + '===': Precedence.Equality, + '!==': Precedence.Equality, + 'is': Precedence.Equality, + 'isnt': Precedence.Equality, + '<': Precedence.Relational, + '>': Precedence.Relational, + '<=': Precedence.Relational, + '>=': Precedence.Relational, + 'in': Precedence.Relational, + 'instanceof': Precedence.Relational, + '<<': Precedence.BitwiseSHIFT, + '>>': Precedence.BitwiseSHIFT, + '>>>': Precedence.BitwiseSHIFT, + '+': Precedence.Additive, + '-': Precedence.Additive, + '*': Precedence.Multiplicative, + '%': Precedence.Multiplicative, + '/': Precedence.Multiplicative, + '**': Precedence.Exponentiation + }; + + //Flags + var F_ALLOW_IN = 1, + F_ALLOW_CALL = 1 << 1, + F_ALLOW_UNPARATH_NEW = 1 << 2, + F_FUNC_BODY = 1 << 3, + F_DIRECTIVE_CTX = 1 << 4, + F_SEMICOLON_OPT = 1 << 5; + + //Expression flag sets + //NOTE: Flag order: + // F_ALLOW_IN + // F_ALLOW_CALL + // F_ALLOW_UNPARATH_NEW + var E_FTT = F_ALLOW_CALL | F_ALLOW_UNPARATH_NEW, + E_TTF = F_ALLOW_IN | F_ALLOW_CALL, + E_TTT = F_ALLOW_IN | F_ALLOW_CALL | F_ALLOW_UNPARATH_NEW, + E_TFF = F_ALLOW_IN, + E_FFT = F_ALLOW_UNPARATH_NEW, + E_TFT = F_ALLOW_IN | F_ALLOW_UNPARATH_NEW; + + //Statement flag sets + //NOTE: Flag order: + // F_ALLOW_IN + // F_FUNC_BODY + // F_DIRECTIVE_CTX + // F_SEMICOLON_OPT + var S_TFFF = F_ALLOW_IN, + S_TFFT = F_ALLOW_IN | F_SEMICOLON_OPT, + S_FFFF = 0x00, + S_TFTF = F_ALLOW_IN | F_DIRECTIVE_CTX, + S_TTFF = F_ALLOW_IN | F_FUNC_BODY; + + function getDefaultOptions() { + // default options + return { + indent: null, + base: null, + parse: null, + comment: false, + format: { + indent: { + style: ' ', + base: 0, + adjustMultilineComment: false + }, + newline: '\n', + space: ' ', + json: false, + renumber: false, + hexadecimal: false, + quotes: 'single', + escapeless: false, + compact: false, + parentheses: true, + semicolons: true, + safeConcatenation: false, + preserveBlankLines: false + }, + moz: { + comprehensionExpressionStartsWithAssignment: false, + starlessGenerator: false + }, + sourceMap: null, + sourceMapRoot: null, + sourceMapWithCode: false, + directive: false, + raw: true, + verbatim: null, + sourceCode: null + }; + } + + function stringRepeat(str, num) { + var result = ''; + + for (num |= 0; num > 0; num >>>= 1, str += str) { + if (num & 1) { + result += str; + } + } + + return result; + } + + function hasLineTerminator(str) { + return (/[\r\n]/g).test(str); + } + + function endsWithLineTerminator(str) { + var len = str.length; + return len && esutils.code.isLineTerminator(str.charCodeAt(len - 1)); + } + + function merge(target, override) { + var key; + for (key in override) { + if (override.hasOwnProperty(key)) { + target[key] = override[key]; + } + } + return target; + } + + function updateDeeply(target, override) { + var key, val; + + function isHashObject(target) { + return typeof target === 'object' && target instanceof Object && !(target instanceof RegExp); + } + + for (key in override) { + if (override.hasOwnProperty(key)) { + val = override[key]; + if (isHashObject(val)) { + if (isHashObject(target[key])) { + updateDeeply(target[key], val); + } else { + target[key] = updateDeeply({}, val); + } + } else { + target[key] = val; + } + } + } + return target; + } + + function generateNumber(value) { + var result, point, temp, exponent, pos; + + if (value !== value) { + throw new Error('Numeric literal whose value is NaN'); + } + if (value < 0 || (value === 0 && 1 / value < 0)) { + throw new Error('Numeric literal whose value is negative'); + } + + if (value === 1 / 0) { + return json ? 'null' : renumber ? '1e400' : '1e+400'; + } + + result = '' + value; + if (!renumber || result.length < 3) { + return result; + } + + point = result.indexOf('.'); + if (!json && result.charCodeAt(0) === 0x30 /* 0 */ && point === 1) { + point = 0; + result = result.slice(1); + } + temp = result; + result = result.replace('e+', 'e'); + exponent = 0; + if ((pos = temp.indexOf('e')) > 0) { + exponent = +temp.slice(pos + 1); + temp = temp.slice(0, pos); + } + if (point >= 0) { + exponent -= temp.length - point - 1; + temp = +(temp.slice(0, point) + temp.slice(point + 1)) + ''; + } + pos = 0; + while (temp.charCodeAt(temp.length + pos - 1) === 0x30 /* 0 */) { + --pos; + } + if (pos !== 0) { + exponent -= pos; + temp = temp.slice(0, pos); + } + if (exponent !== 0) { + temp += 'e' + exponent; + } + if ((temp.length < result.length || + (hexadecimal && value > 1e12 && Math.floor(value) === value && (temp = '0x' + value.toString(16)).length < result.length)) && + +temp === value) { + result = temp; + } + + return result; + } + + // Generate valid RegExp expression. + // This function is based on https://github.com/Constellation/iv Engine + + function escapeRegExpCharacter(ch, previousIsBackslash) { + // not handling '\' and handling \u2028 or \u2029 to unicode escape sequence + if ((ch & ~1) === 0x2028) { + return (previousIsBackslash ? 'u' : '\\u') + ((ch === 0x2028) ? '2028' : '2029'); + } else if (ch === 10 || ch === 13) { // \n, \r + return (previousIsBackslash ? '' : '\\') + ((ch === 10) ? 'n' : 'r'); + } + return String.fromCharCode(ch); + } + + function generateRegExp(reg) { + var match, result, flags, i, iz, ch, characterInBrack, previousIsBackslash; + + result = reg.toString(); + + if (reg.source) { + // extract flag from toString result + match = result.match(/\/([^/]*)$/); + if (!match) { + return result; + } + + flags = match[1]; + result = ''; + + characterInBrack = false; + previousIsBackslash = false; + for (i = 0, iz = reg.source.length; i < iz; ++i) { + ch = reg.source.charCodeAt(i); + + if (!previousIsBackslash) { + if (characterInBrack) { + if (ch === 93) { // ] + characterInBrack = false; + } + } else { + if (ch === 47) { // / + result += '\\'; + } else if (ch === 91) { // [ + characterInBrack = true; + } + } + result += escapeRegExpCharacter(ch, previousIsBackslash); + previousIsBackslash = ch === 92; // \ + } else { + // if new RegExp("\\\n') is provided, create /\n/ + result += escapeRegExpCharacter(ch, previousIsBackslash); + // prevent like /\\[/]/ + previousIsBackslash = false; + } + } + + return '/' + result + '/' + flags; + } + + return result; + } + + function escapeAllowedCharacter(code, next) { + var hex; + + if (code === 0x08 /* \b */) { + return '\\b'; + } + + if (code === 0x0C /* \f */) { + return '\\f'; + } + + if (code === 0x09 /* \t */) { + return '\\t'; + } + + hex = code.toString(16).toUpperCase(); + if (json || code > 0xFF) { + return '\\u' + '0000'.slice(hex.length) + hex; + } else if (code === 0x0000 && !esutils.code.isDecimalDigit(next)) { + return '\\0'; + } else if (code === 0x000B /* \v */) { // '\v' + return '\\x0B'; + } else { + return '\\x' + '00'.slice(hex.length) + hex; + } + } + + function escapeDisallowedCharacter(code) { + if (code === 0x5C /* \ */) { + return '\\\\'; + } + + if (code === 0x0A /* \n */) { + return '\\n'; + } + + if (code === 0x0D /* \r */) { + return '\\r'; + } + + if (code === 0x2028) { + return '\\u2028'; + } + + if (code === 0x2029) { + return '\\u2029'; + } + + throw new Error('Incorrectly classified character'); + } + + function escapeDirective(str) { + var i, iz, code, quote; + + quote = quotes === 'double' ? '"' : '\''; + for (i = 0, iz = str.length; i < iz; ++i) { + code = str.charCodeAt(i); + if (code === 0x27 /* ' */) { + quote = '"'; + break; + } else if (code === 0x22 /* " */) { + quote = '\''; + break; + } else if (code === 0x5C /* \ */) { + ++i; + } + } + + return quote + str + quote; + } + + function escapeString(str) { + var result = '', i, len, code, singleQuotes = 0, doubleQuotes = 0, single, quote; + + for (i = 0, len = str.length; i < len; ++i) { + code = str.charCodeAt(i); + if (code === 0x27 /* ' */) { + ++singleQuotes; + } else if (code === 0x22 /* " */) { + ++doubleQuotes; + } else if (code === 0x2F /* / */ && json) { + result += '\\'; + } else if (esutils.code.isLineTerminator(code) || code === 0x5C /* \ */) { + result += escapeDisallowedCharacter(code); + continue; + } else if (!esutils.code.isIdentifierPartES5(code) && (json && code < 0x20 /* SP */ || !json && !escapeless && (code < 0x20 /* SP */ || code > 0x7E /* ~ */))) { + result += escapeAllowedCharacter(code, str.charCodeAt(i + 1)); + continue; + } + result += String.fromCharCode(code); + } + + single = !(quotes === 'double' || (quotes === 'auto' && doubleQuotes < singleQuotes)); + quote = single ? '\'' : '"'; + + if (!(single ? singleQuotes : doubleQuotes)) { + return quote + result + quote; + } + + str = result; + result = quote; + + for (i = 0, len = str.length; i < len; ++i) { + code = str.charCodeAt(i); + if ((code === 0x27 /* ' */ && single) || (code === 0x22 /* " */ && !single)) { + result += '\\'; + } + result += String.fromCharCode(code); + } + + return result + quote; + } + + /** + * flatten an array to a string, where the array can contain + * either strings or nested arrays + */ + function flattenToString(arr) { + var i, iz, elem, result = ''; + for (i = 0, iz = arr.length; i < iz; ++i) { + elem = arr[i]; + result += Array.isArray(elem) ? flattenToString(elem) : elem; + } + return result; + } + + /** + * convert generated to a SourceNode when source maps are enabled. + */ + function toSourceNodeWhenNeeded(generated, node) { + if (!sourceMap) { + // with no source maps, generated is either an + // array or a string. if an array, flatten it. + // if a string, just return it + if (Array.isArray(generated)) { + return flattenToString(generated); + } else { + return generated; + } + } + if (node == null) { + if (generated instanceof SourceNode) { + return generated; + } else { + node = {}; + } + } + if (node.loc == null) { + return new SourceNode(null, null, sourceMap, generated, node.name || null); + } + return new SourceNode(node.loc.start.line, node.loc.start.column, (sourceMap === true ? node.loc.source || null : sourceMap), generated, node.name || null); + } + + function noEmptySpace() { + return (space) ? space : ' '; + } + + function join(left, right) { + var leftSource, + rightSource, + leftCharCode, + rightCharCode; + + leftSource = toSourceNodeWhenNeeded(left).toString(); + if (leftSource.length === 0) { + return [right]; + } + + rightSource = toSourceNodeWhenNeeded(right).toString(); + if (rightSource.length === 0) { + return [left]; + } + + leftCharCode = leftSource.charCodeAt(leftSource.length - 1); + rightCharCode = rightSource.charCodeAt(0); + + if ((leftCharCode === 0x2B /* + */ || leftCharCode === 0x2D /* - */) && leftCharCode === rightCharCode || + esutils.code.isIdentifierPartES5(leftCharCode) && esutils.code.isIdentifierPartES5(rightCharCode) || + leftCharCode === 0x2F /* / */ && rightCharCode === 0x69 /* i */) { // infix word operators all start with `i` + return [left, noEmptySpace(), right]; + } else if (esutils.code.isWhiteSpace(leftCharCode) || esutils.code.isLineTerminator(leftCharCode) || + esutils.code.isWhiteSpace(rightCharCode) || esutils.code.isLineTerminator(rightCharCode)) { + return [left, right]; + } + return [left, space, right]; + } + + function addIndent(stmt) { + return [base, stmt]; + } + + function withIndent(fn) { + var previousBase; + previousBase = base; + base += indent; + fn(base); + base = previousBase; + } + + function calculateSpaces(str) { + var i; + for (i = str.length - 1; i >= 0; --i) { + if (esutils.code.isLineTerminator(str.charCodeAt(i))) { + break; + } + } + return (str.length - 1) - i; + } + + function adjustMultilineComment(value, specialBase) { + var array, i, len, line, j, spaces, previousBase, sn; + + array = value.split(/\r\n|[\r\n]/); + spaces = Number.MAX_VALUE; + + // first line doesn't have indentation + for (i = 1, len = array.length; i < len; ++i) { + line = array[i]; + j = 0; + while (j < line.length && esutils.code.isWhiteSpace(line.charCodeAt(j))) { + ++j; + } + if (spaces > j) { + spaces = j; + } + } + + if (typeof specialBase !== 'undefined') { + // pattern like + // { + // var t = 20; /* + // * this is comment + // */ + // } + previousBase = base; + if (array[1][spaces] === '*') { + specialBase += ' '; + } + base = specialBase; + } else { + if (spaces & 1) { + // /* + // * + // */ + // If spaces are odd number, above pattern is considered. + // We waste 1 space. + --spaces; + } + previousBase = base; + } + + for (i = 1, len = array.length; i < len; ++i) { + sn = toSourceNodeWhenNeeded(addIndent(array[i].slice(spaces))); + array[i] = sourceMap ? sn.join('') : sn; + } + + base = previousBase; + + return array.join('\n'); + } + + function generateComment(comment, specialBase) { + if (comment.type === 'Line') { + if (endsWithLineTerminator(comment.value)) { + return '//' + comment.value; + } else { + // Always use LineTerminator + var result = '//' + comment.value; + if (!preserveBlankLines) { + result += '\n'; + } + return result; + } + } + if (extra.format.indent.adjustMultilineComment && /[\n\r]/.test(comment.value)) { + return adjustMultilineComment('/*' + comment.value + '*/', specialBase); + } + return '/*' + comment.value + '*/'; + } + + function addComments(stmt, result) { + var i, len, comment, save, tailingToStatement, specialBase, fragment, + extRange, range, prevRange, prefix, infix, suffix, count; + + if (stmt.leadingComments && stmt.leadingComments.length > 0) { + save = result; + + if (preserveBlankLines) { + comment = stmt.leadingComments[0]; + result = []; + + extRange = comment.extendedRange; + range = comment.range; + + prefix = sourceCode.substring(extRange[0], range[0]); + count = (prefix.match(/\n/g) || []).length; + if (count > 0) { + result.push(stringRepeat('\n', count)); + result.push(addIndent(generateComment(comment))); + } else { + result.push(prefix); + result.push(generateComment(comment)); + } + + prevRange = range; + + for (i = 1, len = stmt.leadingComments.length; i < len; i++) { + comment = stmt.leadingComments[i]; + range = comment.range; + + infix = sourceCode.substring(prevRange[1], range[0]); + count = (infix.match(/\n/g) || []).length; + result.push(stringRepeat('\n', count)); + result.push(addIndent(generateComment(comment))); + + prevRange = range; + } + + suffix = sourceCode.substring(range[1], extRange[1]); + count = (suffix.match(/\n/g) || []).length; + result.push(stringRepeat('\n', count)); + } else { + comment = stmt.leadingComments[0]; + result = []; + if (safeConcatenation && stmt.type === Syntax.Program && stmt.body.length === 0) { + result.push('\n'); + } + result.push(generateComment(comment)); + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push('\n'); + } + + for (i = 1, len = stmt.leadingComments.length; i < len; ++i) { + comment = stmt.leadingComments[i]; + fragment = [generateComment(comment)]; + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(fragment).toString())) { + fragment.push('\n'); + } + result.push(addIndent(fragment)); + } + } + + result.push(addIndent(save)); + } + + if (stmt.trailingComments) { + + if (preserveBlankLines) { + comment = stmt.trailingComments[0]; + extRange = comment.extendedRange; + range = comment.range; + + prefix = sourceCode.substring(extRange[0], range[0]); + count = (prefix.match(/\n/g) || []).length; + + if (count > 0) { + result.push(stringRepeat('\n', count)); + result.push(addIndent(generateComment(comment))); + } else { + result.push(prefix); + result.push(generateComment(comment)); + } + } else { + tailingToStatement = !endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString()); + specialBase = stringRepeat(' ', calculateSpaces(toSourceNodeWhenNeeded([base, result, indent]).toString())); + for (i = 0, len = stmt.trailingComments.length; i < len; ++i) { + comment = stmt.trailingComments[i]; + if (tailingToStatement) { + // We assume target like following script + // + // var t = 20; /** + // * This is comment of t + // */ + if (i === 0) { + // first case + result = [result, indent]; + } else { + result = [result, specialBase]; + } + result.push(generateComment(comment, specialBase)); + } else { + result = [result, addIndent(generateComment(comment))]; + } + if (i !== len - 1 && !endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result = [result, '\n']; + } + } + } + } + + return result; + } + + function generateBlankLines(start, end, result) { + var j, newlineCount = 0; + + for (j = start; j < end; j++) { + if (sourceCode[j] === '\n') { + newlineCount++; + } + } + + for (j = 1; j < newlineCount; j++) { + result.push(newline); + } + } + + function parenthesize(text, current, should) { + if (current < should) { + return ['(', text, ')']; + } + return text; + } + + function generateVerbatimString(string) { + var i, iz, result; + result = string.split(/\r\n|\n/); + for (i = 1, iz = result.length; i < iz; i++) { + result[i] = newline + base + result[i]; + } + return result; + } + + function generateVerbatim(expr, precedence) { + var verbatim, result, prec; + verbatim = expr[extra.verbatim]; + + if (typeof verbatim === 'string') { + result = parenthesize(generateVerbatimString(verbatim), Precedence.Sequence, precedence); + } else { + // verbatim is object + result = generateVerbatimString(verbatim.content); + prec = (verbatim.precedence != null) ? verbatim.precedence : Precedence.Sequence; + result = parenthesize(result, prec, precedence); + } + + return toSourceNodeWhenNeeded(result, expr); + } + + function CodeGenerator() { + } + + // Helpers. + + CodeGenerator.prototype.maybeBlock = function(stmt, flags) { + var result, noLeadingComment, that = this; + + noLeadingComment = !extra.comment || !stmt.leadingComments; + + if (stmt.type === Syntax.BlockStatement && noLeadingComment) { + return [space, this.generateStatement(stmt, flags)]; + } + + if (stmt.type === Syntax.EmptyStatement && noLeadingComment) { + return ';'; + } + + withIndent(function () { + result = [ + newline, + addIndent(that.generateStatement(stmt, flags)) + ]; + }); + + return result; + }; + + CodeGenerator.prototype.maybeBlockSuffix = function (stmt, result) { + var ends = endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString()); + if (stmt.type === Syntax.BlockStatement && (!extra.comment || !stmt.leadingComments) && !ends) { + return [result, space]; + } + if (ends) { + return [result, base]; + } + return [result, newline, base]; + }; + + function generateIdentifier(node) { + return toSourceNodeWhenNeeded(node.name, node); + } + + function generateAsyncPrefix(node, spaceRequired) { + return node.async ? 'async' + (spaceRequired ? noEmptySpace() : space) : ''; + } + + function generateStarSuffix(node) { + var isGenerator = node.generator && !extra.moz.starlessGenerator; + return isGenerator ? '*' + space : ''; + } + + function generateMethodPrefix(prop) { + var func = prop.value, prefix = ''; + if (func.async) { + prefix += generateAsyncPrefix(func, !prop.computed); + } + if (func.generator) { + // avoid space before method name + prefix += generateStarSuffix(func) ? '*' : ''; + } + return prefix; + } + + CodeGenerator.prototype.generatePattern = function (node, precedence, flags) { + if (node.type === Syntax.Identifier) { + return generateIdentifier(node); + } + return this.generateExpression(node, precedence, flags); + }; + + CodeGenerator.prototype.generateFunctionParams = function (node) { + var i, iz, result, hasDefault; + + hasDefault = false; + + if (node.type === Syntax.ArrowFunctionExpression && + !node.rest && (!node.defaults || node.defaults.length === 0) && + node.params.length === 1 && node.params[0].type === Syntax.Identifier) { + // arg => { } case + result = [generateAsyncPrefix(node, true), generateIdentifier(node.params[0])]; + } else { + result = node.type === Syntax.ArrowFunctionExpression ? [generateAsyncPrefix(node, false)] : []; + result.push('('); + if (node.defaults) { + hasDefault = true; + } + for (i = 0, iz = node.params.length; i < iz; ++i) { + if (hasDefault && node.defaults[i]) { + // Handle default values. + result.push(this.generateAssignment(node.params[i], node.defaults[i], '=', Precedence.Assignment, E_TTT)); + } else { + result.push(this.generatePattern(node.params[i], Precedence.Assignment, E_TTT)); + } + if (i + 1 < iz) { + result.push(',' + space); + } + } + + if (node.rest) { + if (node.params.length) { + result.push(',' + space); + } + result.push('...'); + result.push(generateIdentifier(node.rest)); + } + + result.push(')'); + } + + return result; + }; + + CodeGenerator.prototype.generateFunctionBody = function (node) { + var result, expr; + + result = this.generateFunctionParams(node); + + if (node.type === Syntax.ArrowFunctionExpression) { + result.push(space); + result.push('=>'); + } + + if (node.expression) { + result.push(space); + expr = this.generateExpression(node.body, Precedence.Assignment, E_TTT); + if (expr.toString().charAt(0) === '{') { + expr = ['(', expr, ')']; + } + result.push(expr); + } else { + result.push(this.maybeBlock(node.body, S_TTFF)); + } + + return result; + }; + + CodeGenerator.prototype.generateIterationForStatement = function (operator, stmt, flags) { + var result = ['for' + (stmt.await ? noEmptySpace() + 'await' : '') + space + '('], that = this; + withIndent(function () { + if (stmt.left.type === Syntax.VariableDeclaration) { + withIndent(function () { + result.push(stmt.left.kind + noEmptySpace()); + result.push(that.generateStatement(stmt.left.declarations[0], S_FFFF)); + }); + } else { + result.push(that.generateExpression(stmt.left, Precedence.Call, E_TTT)); + } + + result = join(result, operator); + result = [join( + result, + that.generateExpression(stmt.right, Precedence.Assignment, E_TTT) + ), ')']; + }); + result.push(this.maybeBlock(stmt.body, flags)); + return result; + }; + + CodeGenerator.prototype.generatePropertyKey = function (expr, computed) { + var result = []; + + if (computed) { + result.push('['); + } + + result.push(this.generateExpression(expr, Precedence.Assignment, E_TTT)); + + if (computed) { + result.push(']'); + } + + return result; + }; + + CodeGenerator.prototype.generateAssignment = function (left, right, operator, precedence, flags) { + if (Precedence.Assignment < precedence) { + flags |= F_ALLOW_IN; + } + + return parenthesize( + [ + this.generateExpression(left, Precedence.Call, flags), + space + operator + space, + this.generateExpression(right, Precedence.Assignment, flags) + ], + Precedence.Assignment, + precedence + ); + }; + + CodeGenerator.prototype.semicolon = function (flags) { + if (!semicolons && flags & F_SEMICOLON_OPT) { + return ''; + } + return ';'; + }; + + // Statements. + + CodeGenerator.Statement = { + + BlockStatement: function (stmt, flags) { + var range, content, result = ['{', newline], that = this; + + withIndent(function () { + // handle functions without any code + if (stmt.body.length === 0 && preserveBlankLines) { + range = stmt.range; + if (range[1] - range[0] > 2) { + content = sourceCode.substring(range[0] + 1, range[1] - 1); + if (content[0] === '\n') { + result = ['{']; + } + result.push(content); + } + } + + var i, iz, fragment, bodyFlags; + bodyFlags = S_TFFF; + if (flags & F_FUNC_BODY) { + bodyFlags |= F_DIRECTIVE_CTX; + } + + for (i = 0, iz = stmt.body.length; i < iz; ++i) { + if (preserveBlankLines) { + // handle spaces before the first line + if (i === 0) { + if (stmt.body[0].leadingComments) { + range = stmt.body[0].leadingComments[0].extendedRange; + content = sourceCode.substring(range[0], range[1]); + if (content[0] === '\n') { + result = ['{']; + } + } + if (!stmt.body[0].leadingComments) { + generateBlankLines(stmt.range[0], stmt.body[0].range[0], result); + } + } + + // handle spaces between lines + if (i > 0) { + if (!stmt.body[i - 1].trailingComments && !stmt.body[i].leadingComments) { + generateBlankLines(stmt.body[i - 1].range[1], stmt.body[i].range[0], result); + } + } + } + + if (i === iz - 1) { + bodyFlags |= F_SEMICOLON_OPT; + } + + if (stmt.body[i].leadingComments && preserveBlankLines) { + fragment = that.generateStatement(stmt.body[i], bodyFlags); + } else { + fragment = addIndent(that.generateStatement(stmt.body[i], bodyFlags)); + } + + result.push(fragment); + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(fragment).toString())) { + if (preserveBlankLines && i < iz - 1) { + // don't add a new line if there are leading coments + // in the next statement + if (!stmt.body[i + 1].leadingComments) { + result.push(newline); + } + } else { + result.push(newline); + } + } + + if (preserveBlankLines) { + // handle spaces after the last line + if (i === iz - 1) { + if (!stmt.body[i].trailingComments) { + generateBlankLines(stmt.body[i].range[1], stmt.range[1], result); + } + } + } + } + }); + + result.push(addIndent('}')); + return result; + }, + + BreakStatement: function (stmt, flags) { + if (stmt.label) { + return 'break ' + stmt.label.name + this.semicolon(flags); + } + return 'break' + this.semicolon(flags); + }, + + ContinueStatement: function (stmt, flags) { + if (stmt.label) { + return 'continue ' + stmt.label.name + this.semicolon(flags); + } + return 'continue' + this.semicolon(flags); + }, + + ClassBody: function (stmt, flags) { + var result = [ '{', newline], that = this; + + withIndent(function (indent) { + var i, iz; + + for (i = 0, iz = stmt.body.length; i < iz; ++i) { + result.push(indent); + result.push(that.generateExpression(stmt.body[i], Precedence.Sequence, E_TTT)); + if (i + 1 < iz) { + result.push(newline); + } + } + }); + + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push(newline); + } + result.push(base); + result.push('}'); + return result; + }, + + ClassDeclaration: function (stmt, flags) { + var result, fragment; + result = ['class']; + if (stmt.id) { + result = join(result, this.generateExpression(stmt.id, Precedence.Sequence, E_TTT)); + } + if (stmt.superClass) { + fragment = join('extends', this.generateExpression(stmt.superClass, Precedence.Unary, E_TTT)); + result = join(result, fragment); + } + result.push(space); + result.push(this.generateStatement(stmt.body, S_TFFT)); + return result; + }, + + DirectiveStatement: function (stmt, flags) { + if (extra.raw && stmt.raw) { + return stmt.raw + this.semicolon(flags); + } + return escapeDirective(stmt.directive) + this.semicolon(flags); + }, + + DoWhileStatement: function (stmt, flags) { + // Because `do 42 while (cond)` is Syntax Error. We need semicolon. + var result = join('do', this.maybeBlock(stmt.body, S_TFFF)); + result = this.maybeBlockSuffix(stmt.body, result); + return join(result, [ + 'while' + space + '(', + this.generateExpression(stmt.test, Precedence.Sequence, E_TTT), + ')' + this.semicolon(flags) + ]); + }, + + CatchClause: function (stmt, flags) { + var result, that = this; + withIndent(function () { + var guard; + + if (stmt.param) { + result = [ + 'catch' + space + '(', + that.generateExpression(stmt.param, Precedence.Sequence, E_TTT), + ')' + ]; + + if (stmt.guard) { + guard = that.generateExpression(stmt.guard, Precedence.Sequence, E_TTT); + result.splice(2, 0, ' if ', guard); + } + } else { + result = ['catch']; + } + }); + result.push(this.maybeBlock(stmt.body, S_TFFF)); + return result; + }, + + DebuggerStatement: function (stmt, flags) { + return 'debugger' + this.semicolon(flags); + }, + + EmptyStatement: function (stmt, flags) { + return ';'; + }, + + ExportDefaultDeclaration: function (stmt, flags) { + var result = [ 'export' ], bodyFlags; + + bodyFlags = (flags & F_SEMICOLON_OPT) ? S_TFFT : S_TFFF; + + // export default HoistableDeclaration[Default] + // export default AssignmentExpression[In] ; + result = join(result, 'default'); + if (isStatement(stmt.declaration)) { + result = join(result, this.generateStatement(stmt.declaration, bodyFlags)); + } else { + result = join(result, this.generateExpression(stmt.declaration, Precedence.Assignment, E_TTT) + this.semicolon(flags)); + } + return result; + }, + + ExportNamedDeclaration: function (stmt, flags) { + var result = [ 'export' ], bodyFlags, that = this; + + bodyFlags = (flags & F_SEMICOLON_OPT) ? S_TFFT : S_TFFF; + + // export VariableStatement + // export Declaration[Default] + if (stmt.declaration) { + return join(result, this.generateStatement(stmt.declaration, bodyFlags)); + } + + // export ExportClause[NoReference] FromClause ; + // export ExportClause ; + if (stmt.specifiers) { + if (stmt.specifiers.length === 0) { + result = join(result, '{' + space + '}'); + } else if (stmt.specifiers[0].type === Syntax.ExportBatchSpecifier) { + result = join(result, this.generateExpression(stmt.specifiers[0], Precedence.Sequence, E_TTT)); + } else { + result = join(result, '{'); + withIndent(function (indent) { + var i, iz; + result.push(newline); + for (i = 0, iz = stmt.specifiers.length; i < iz; ++i) { + result.push(indent); + result.push(that.generateExpression(stmt.specifiers[i], Precedence.Sequence, E_TTT)); + if (i + 1 < iz) { + result.push(',' + newline); + } + } + }); + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push(newline); + } + result.push(base + '}'); + } + + if (stmt.source) { + result = join(result, [ + 'from' + space, + // ModuleSpecifier + this.generateExpression(stmt.source, Precedence.Sequence, E_TTT), + this.semicolon(flags) + ]); + } else { + result.push(this.semicolon(flags)); + } + } + return result; + }, + + ExportAllDeclaration: function (stmt, flags) { + // export * FromClause ; + return [ + 'export' + space, + '*' + space, + 'from' + space, + // ModuleSpecifier + this.generateExpression(stmt.source, Precedence.Sequence, E_TTT), + this.semicolon(flags) + ]; + }, + + ExpressionStatement: function (stmt, flags) { + var result, fragment; + + function isClassPrefixed(fragment) { + var code; + if (fragment.slice(0, 5) !== 'class') { + return false; + } + code = fragment.charCodeAt(5); + return code === 0x7B /* '{' */ || esutils.code.isWhiteSpace(code) || esutils.code.isLineTerminator(code); + } + + function isFunctionPrefixed(fragment) { + var code; + if (fragment.slice(0, 8) !== 'function') { + return false; + } + code = fragment.charCodeAt(8); + return code === 0x28 /* '(' */ || esutils.code.isWhiteSpace(code) || code === 0x2A /* '*' */ || esutils.code.isLineTerminator(code); + } + + function isAsyncPrefixed(fragment) { + var code, i, iz; + if (fragment.slice(0, 5) !== 'async') { + return false; + } + if (!esutils.code.isWhiteSpace(fragment.charCodeAt(5))) { + return false; + } + for (i = 6, iz = fragment.length; i < iz; ++i) { + if (!esutils.code.isWhiteSpace(fragment.charCodeAt(i))) { + break; + } + } + if (i === iz) { + return false; + } + if (fragment.slice(i, i + 8) !== 'function') { + return false; + } + code = fragment.charCodeAt(i + 8); + return code === 0x28 /* '(' */ || esutils.code.isWhiteSpace(code) || code === 0x2A /* '*' */ || esutils.code.isLineTerminator(code); + } + + result = [this.generateExpression(stmt.expression, Precedence.Sequence, E_TTT)]; + // 12.4 '{', 'function', 'class' is not allowed in this position. + // wrap expression with parentheses + fragment = toSourceNodeWhenNeeded(result).toString(); + if (fragment.charCodeAt(0) === 0x7B /* '{' */ || // ObjectExpression + isClassPrefixed(fragment) || + isFunctionPrefixed(fragment) || + isAsyncPrefixed(fragment) || + (directive && (flags & F_DIRECTIVE_CTX) && stmt.expression.type === Syntax.Literal && typeof stmt.expression.value === 'string')) { + result = ['(', result, ')' + this.semicolon(flags)]; + } else { + result.push(this.semicolon(flags)); + } + return result; + }, + + ImportDeclaration: function (stmt, flags) { + // ES6: 15.2.1 valid import declarations: + // - import ImportClause FromClause ; + // - import ModuleSpecifier ; + var result, cursor, that = this; + + // If no ImportClause is present, + // this should be `import ModuleSpecifier` so skip `from` + // ModuleSpecifier is StringLiteral. + if (stmt.specifiers.length === 0) { + // import ModuleSpecifier ; + return [ + 'import', + space, + // ModuleSpecifier + this.generateExpression(stmt.source, Precedence.Sequence, E_TTT), + this.semicolon(flags) + ]; + } + + // import ImportClause FromClause ; + result = [ + 'import' + ]; + cursor = 0; + + // ImportedBinding + if (stmt.specifiers[cursor].type === Syntax.ImportDefaultSpecifier) { + result = join(result, [ + this.generateExpression(stmt.specifiers[cursor], Precedence.Sequence, E_TTT) + ]); + ++cursor; + } + + if (stmt.specifiers[cursor]) { + if (cursor !== 0) { + result.push(','); + } + + if (stmt.specifiers[cursor].type === Syntax.ImportNamespaceSpecifier) { + // NameSpaceImport + result = join(result, [ + space, + this.generateExpression(stmt.specifiers[cursor], Precedence.Sequence, E_TTT) + ]); + } else { + // NamedImports + result.push(space + '{'); + + if ((stmt.specifiers.length - cursor) === 1) { + // import { ... } from "..."; + result.push(space); + result.push(this.generateExpression(stmt.specifiers[cursor], Precedence.Sequence, E_TTT)); + result.push(space + '}' + space); + } else { + // import { + // ..., + // ..., + // } from "..."; + withIndent(function (indent) { + var i, iz; + result.push(newline); + for (i = cursor, iz = stmt.specifiers.length; i < iz; ++i) { + result.push(indent); + result.push(that.generateExpression(stmt.specifiers[i], Precedence.Sequence, E_TTT)); + if (i + 1 < iz) { + result.push(',' + newline); + } + } + }); + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push(newline); + } + result.push(base + '}' + space); + } + } + } + + result = join(result, [ + 'from' + space, + // ModuleSpecifier + this.generateExpression(stmt.source, Precedence.Sequence, E_TTT), + this.semicolon(flags) + ]); + return result; + }, + + VariableDeclarator: function (stmt, flags) { + var itemFlags = (flags & F_ALLOW_IN) ? E_TTT : E_FTT; + if (stmt.init) { + return [ + this.generateExpression(stmt.id, Precedence.Assignment, itemFlags), + space, + '=', + space, + this.generateExpression(stmt.init, Precedence.Assignment, itemFlags) + ]; + } + return this.generatePattern(stmt.id, Precedence.Assignment, itemFlags); + }, + + VariableDeclaration: function (stmt, flags) { + // VariableDeclarator is typed as Statement, + // but joined with comma (not LineTerminator). + // So if comment is attached to target node, we should specialize. + var result, i, iz, node, bodyFlags, that = this; + + result = [ stmt.kind ]; + + bodyFlags = (flags & F_ALLOW_IN) ? S_TFFF : S_FFFF; + + function block() { + node = stmt.declarations[0]; + if (extra.comment && node.leadingComments) { + result.push('\n'); + result.push(addIndent(that.generateStatement(node, bodyFlags))); + } else { + result.push(noEmptySpace()); + result.push(that.generateStatement(node, bodyFlags)); + } + + for (i = 1, iz = stmt.declarations.length; i < iz; ++i) { + node = stmt.declarations[i]; + if (extra.comment && node.leadingComments) { + result.push(',' + newline); + result.push(addIndent(that.generateStatement(node, bodyFlags))); + } else { + result.push(',' + space); + result.push(that.generateStatement(node, bodyFlags)); + } + } + } + + if (stmt.declarations.length > 1) { + withIndent(block); + } else { + block(); + } + + result.push(this.semicolon(flags)); + + return result; + }, + + ThrowStatement: function (stmt, flags) { + return [join( + 'throw', + this.generateExpression(stmt.argument, Precedence.Sequence, E_TTT) + ), this.semicolon(flags)]; + }, + + TryStatement: function (stmt, flags) { + var result, i, iz, guardedHandlers; + + result = ['try', this.maybeBlock(stmt.block, S_TFFF)]; + result = this.maybeBlockSuffix(stmt.block, result); + + if (stmt.handlers) { + // old interface + for (i = 0, iz = stmt.handlers.length; i < iz; ++i) { + result = join(result, this.generateStatement(stmt.handlers[i], S_TFFF)); + if (stmt.finalizer || i + 1 !== iz) { + result = this.maybeBlockSuffix(stmt.handlers[i].body, result); + } + } + } else { + guardedHandlers = stmt.guardedHandlers || []; + + for (i = 0, iz = guardedHandlers.length; i < iz; ++i) { + result = join(result, this.generateStatement(guardedHandlers[i], S_TFFF)); + if (stmt.finalizer || i + 1 !== iz) { + result = this.maybeBlockSuffix(guardedHandlers[i].body, result); + } + } + + // new interface + if (stmt.handler) { + if (Array.isArray(stmt.handler)) { + for (i = 0, iz = stmt.handler.length; i < iz; ++i) { + result = join(result, this.generateStatement(stmt.handler[i], S_TFFF)); + if (stmt.finalizer || i + 1 !== iz) { + result = this.maybeBlockSuffix(stmt.handler[i].body, result); + } + } + } else { + result = join(result, this.generateStatement(stmt.handler, S_TFFF)); + if (stmt.finalizer) { + result = this.maybeBlockSuffix(stmt.handler.body, result); + } + } + } + } + if (stmt.finalizer) { + result = join(result, ['finally', this.maybeBlock(stmt.finalizer, S_TFFF)]); + } + return result; + }, + + SwitchStatement: function (stmt, flags) { + var result, fragment, i, iz, bodyFlags, that = this; + withIndent(function () { + result = [ + 'switch' + space + '(', + that.generateExpression(stmt.discriminant, Precedence.Sequence, E_TTT), + ')' + space + '{' + newline + ]; + }); + if (stmt.cases) { + bodyFlags = S_TFFF; + for (i = 0, iz = stmt.cases.length; i < iz; ++i) { + if (i === iz - 1) { + bodyFlags |= F_SEMICOLON_OPT; + } + fragment = addIndent(this.generateStatement(stmt.cases[i], bodyFlags)); + result.push(fragment); + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(fragment).toString())) { + result.push(newline); + } + } + } + result.push(addIndent('}')); + return result; + }, + + SwitchCase: function (stmt, flags) { + var result, fragment, i, iz, bodyFlags, that = this; + withIndent(function () { + if (stmt.test) { + result = [ + join('case', that.generateExpression(stmt.test, Precedence.Sequence, E_TTT)), + ':' + ]; + } else { + result = ['default:']; + } + + i = 0; + iz = stmt.consequent.length; + if (iz && stmt.consequent[0].type === Syntax.BlockStatement) { + fragment = that.maybeBlock(stmt.consequent[0], S_TFFF); + result.push(fragment); + i = 1; + } + + if (i !== iz && !endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push(newline); + } + + bodyFlags = S_TFFF; + for (; i < iz; ++i) { + if (i === iz - 1 && flags & F_SEMICOLON_OPT) { + bodyFlags |= F_SEMICOLON_OPT; + } + fragment = addIndent(that.generateStatement(stmt.consequent[i], bodyFlags)); + result.push(fragment); + if (i + 1 !== iz && !endsWithLineTerminator(toSourceNodeWhenNeeded(fragment).toString())) { + result.push(newline); + } + } + }); + return result; + }, + + IfStatement: function (stmt, flags) { + var result, bodyFlags, semicolonOptional, that = this; + withIndent(function () { + result = [ + 'if' + space + '(', + that.generateExpression(stmt.test, Precedence.Sequence, E_TTT), + ')' + ]; + }); + semicolonOptional = flags & F_SEMICOLON_OPT; + bodyFlags = S_TFFF; + if (semicolonOptional) { + bodyFlags |= F_SEMICOLON_OPT; + } + if (stmt.alternate) { + result.push(this.maybeBlock(stmt.consequent, S_TFFF)); + result = this.maybeBlockSuffix(stmt.consequent, result); + if (stmt.alternate.type === Syntax.IfStatement) { + result = join(result, ['else ', this.generateStatement(stmt.alternate, bodyFlags)]); + } else { + result = join(result, join('else', this.maybeBlock(stmt.alternate, bodyFlags))); + } + } else { + result.push(this.maybeBlock(stmt.consequent, bodyFlags)); + } + return result; + }, + + ForStatement: function (stmt, flags) { + var result, that = this; + withIndent(function () { + result = ['for' + space + '(']; + if (stmt.init) { + if (stmt.init.type === Syntax.VariableDeclaration) { + result.push(that.generateStatement(stmt.init, S_FFFF)); + } else { + // F_ALLOW_IN becomes false. + result.push(that.generateExpression(stmt.init, Precedence.Sequence, E_FTT)); + result.push(';'); + } + } else { + result.push(';'); + } + + if (stmt.test) { + result.push(space); + result.push(that.generateExpression(stmt.test, Precedence.Sequence, E_TTT)); + result.push(';'); + } else { + result.push(';'); + } + + if (stmt.update) { + result.push(space); + result.push(that.generateExpression(stmt.update, Precedence.Sequence, E_TTT)); + result.push(')'); + } else { + result.push(')'); + } + }); + + result.push(this.maybeBlock(stmt.body, flags & F_SEMICOLON_OPT ? S_TFFT : S_TFFF)); + return result; + }, + + ForInStatement: function (stmt, flags) { + return this.generateIterationForStatement('in', stmt, flags & F_SEMICOLON_OPT ? S_TFFT : S_TFFF); + }, + + ForOfStatement: function (stmt, flags) { + return this.generateIterationForStatement('of', stmt, flags & F_SEMICOLON_OPT ? S_TFFT : S_TFFF); + }, + + LabeledStatement: function (stmt, flags) { + return [stmt.label.name + ':', this.maybeBlock(stmt.body, flags & F_SEMICOLON_OPT ? S_TFFT : S_TFFF)]; + }, + + Program: function (stmt, flags) { + var result, fragment, i, iz, bodyFlags; + iz = stmt.body.length; + result = [safeConcatenation && iz > 0 ? '\n' : '']; + bodyFlags = S_TFTF; + for (i = 0; i < iz; ++i) { + if (!safeConcatenation && i === iz - 1) { + bodyFlags |= F_SEMICOLON_OPT; + } + + if (preserveBlankLines) { + // handle spaces before the first line + if (i === 0) { + if (!stmt.body[0].leadingComments) { + generateBlankLines(stmt.range[0], stmt.body[i].range[0], result); + } + } + + // handle spaces between lines + if (i > 0) { + if (!stmt.body[i - 1].trailingComments && !stmt.body[i].leadingComments) { + generateBlankLines(stmt.body[i - 1].range[1], stmt.body[i].range[0], result); + } + } + } + + fragment = addIndent(this.generateStatement(stmt.body[i], bodyFlags)); + result.push(fragment); + if (i + 1 < iz && !endsWithLineTerminator(toSourceNodeWhenNeeded(fragment).toString())) { + if (preserveBlankLines) { + if (!stmt.body[i + 1].leadingComments) { + result.push(newline); + } + } else { + result.push(newline); + } + } + + if (preserveBlankLines) { + // handle spaces after the last line + if (i === iz - 1) { + if (!stmt.body[i].trailingComments) { + generateBlankLines(stmt.body[i].range[1], stmt.range[1], result); + } + } + } + } + return result; + }, + + FunctionDeclaration: function (stmt, flags) { + return [ + generateAsyncPrefix(stmt, true), + 'function', + generateStarSuffix(stmt) || noEmptySpace(), + stmt.id ? generateIdentifier(stmt.id) : '', + this.generateFunctionBody(stmt) + ]; + }, + + ReturnStatement: function (stmt, flags) { + if (stmt.argument) { + return [join( + 'return', + this.generateExpression(stmt.argument, Precedence.Sequence, E_TTT) + ), this.semicolon(flags)]; + } + return ['return' + this.semicolon(flags)]; + }, + + WhileStatement: function (stmt, flags) { + var result, that = this; + withIndent(function () { + result = [ + 'while' + space + '(', + that.generateExpression(stmt.test, Precedence.Sequence, E_TTT), + ')' + ]; + }); + result.push(this.maybeBlock(stmt.body, flags & F_SEMICOLON_OPT ? S_TFFT : S_TFFF)); + return result; + }, + + WithStatement: function (stmt, flags) { + var result, that = this; + withIndent(function () { + result = [ + 'with' + space + '(', + that.generateExpression(stmt.object, Precedence.Sequence, E_TTT), + ')' + ]; + }); + result.push(this.maybeBlock(stmt.body, flags & F_SEMICOLON_OPT ? S_TFFT : S_TFFF)); + return result; + } + + }; + + merge(CodeGenerator.prototype, CodeGenerator.Statement); + + // Expressions. + + CodeGenerator.Expression = { + + SequenceExpression: function (expr, precedence, flags) { + var result, i, iz; + if (Precedence.Sequence < precedence) { + flags |= F_ALLOW_IN; + } + result = []; + for (i = 0, iz = expr.expressions.length; i < iz; ++i) { + result.push(this.generateExpression(expr.expressions[i], Precedence.Assignment, flags)); + if (i + 1 < iz) { + result.push(',' + space); + } + } + return parenthesize(result, Precedence.Sequence, precedence); + }, + + AssignmentExpression: function (expr, precedence, flags) { + return this.generateAssignment(expr.left, expr.right, expr.operator, precedence, flags); + }, + + ArrowFunctionExpression: function (expr, precedence, flags) { + return parenthesize(this.generateFunctionBody(expr), Precedence.ArrowFunction, precedence); + }, + + ConditionalExpression: function (expr, precedence, flags) { + if (Precedence.Conditional < precedence) { + flags |= F_ALLOW_IN; + } + return parenthesize( + [ + this.generateExpression(expr.test, Precedence.LogicalOR, flags), + space + '?' + space, + this.generateExpression(expr.consequent, Precedence.Assignment, flags), + space + ':' + space, + this.generateExpression(expr.alternate, Precedence.Assignment, flags) + ], + Precedence.Conditional, + precedence + ); + }, + + LogicalExpression: function (expr, precedence, flags) { + return this.BinaryExpression(expr, precedence, flags); + }, + + BinaryExpression: function (expr, precedence, flags) { + var result, leftPrecedence, rightPrecedence, currentPrecedence, fragment, leftSource; + currentPrecedence = BinaryPrecedence[expr.operator]; + leftPrecedence = expr.operator === '**' ? Precedence.Postfix : currentPrecedence; + rightPrecedence = expr.operator === '**' ? currentPrecedence : currentPrecedence + 1; + + if (currentPrecedence < precedence) { + flags |= F_ALLOW_IN; + } + + fragment = this.generateExpression(expr.left, leftPrecedence, flags); + + leftSource = fragment.toString(); + + if (leftSource.charCodeAt(leftSource.length - 1) === 0x2F /* / */ && esutils.code.isIdentifierPartES5(expr.operator.charCodeAt(0))) { + result = [fragment, noEmptySpace(), expr.operator]; + } else { + result = join(fragment, expr.operator); + } + + fragment = this.generateExpression(expr.right, rightPrecedence, flags); + + if (expr.operator === '/' && fragment.toString().charAt(0) === '/' || + expr.operator.slice(-1) === '<' && fragment.toString().slice(0, 3) === '!--') { + // If '/' concats with '/' or `<` concats with `!--`, it is interpreted as comment start + result.push(noEmptySpace()); + result.push(fragment); + } else { + result = join(result, fragment); + } + + if (expr.operator === 'in' && !(flags & F_ALLOW_IN)) { + return ['(', result, ')']; + } + return parenthesize(result, currentPrecedence, precedence); + }, + + CallExpression: function (expr, precedence, flags) { + var result, i, iz; + // F_ALLOW_UNPARATH_NEW becomes false. + result = [this.generateExpression(expr.callee, Precedence.Call, E_TTF)]; + result.push('('); + for (i = 0, iz = expr['arguments'].length; i < iz; ++i) { + result.push(this.generateExpression(expr['arguments'][i], Precedence.Assignment, E_TTT)); + if (i + 1 < iz) { + result.push(',' + space); + } + } + result.push(')'); + + if (!(flags & F_ALLOW_CALL)) { + return ['(', result, ')']; + } + return parenthesize(result, Precedence.Call, precedence); + }, + + NewExpression: function (expr, precedence, flags) { + var result, length, i, iz, itemFlags; + length = expr['arguments'].length; + + // F_ALLOW_CALL becomes false. + // F_ALLOW_UNPARATH_NEW may become false. + itemFlags = (flags & F_ALLOW_UNPARATH_NEW && !parentheses && length === 0) ? E_TFT : E_TFF; + + result = join( + 'new', + this.generateExpression(expr.callee, Precedence.New, itemFlags) + ); + + if (!(flags & F_ALLOW_UNPARATH_NEW) || parentheses || length > 0) { + result.push('('); + for (i = 0, iz = length; i < iz; ++i) { + result.push(this.generateExpression(expr['arguments'][i], Precedence.Assignment, E_TTT)); + if (i + 1 < iz) { + result.push(',' + space); + } + } + result.push(')'); + } + + return parenthesize(result, Precedence.New, precedence); + }, + + MemberExpression: function (expr, precedence, flags) { + var result, fragment; + + // F_ALLOW_UNPARATH_NEW becomes false. + result = [this.generateExpression(expr.object, Precedence.Call, (flags & F_ALLOW_CALL) ? E_TTF : E_TFF)]; + + if (expr.computed) { + result.push('['); + result.push(this.generateExpression(expr.property, Precedence.Sequence, flags & F_ALLOW_CALL ? E_TTT : E_TFT)); + result.push(']'); + } else { + if (expr.object.type === Syntax.Literal && typeof expr.object.value === 'number') { + fragment = toSourceNodeWhenNeeded(result).toString(); + // When the following conditions are all true, + // 1. No floating point + // 2. Don't have exponents + // 3. The last character is a decimal digit + // 4. Not hexadecimal OR octal number literal + // we should add a floating point. + if ( + fragment.indexOf('.') < 0 && + !/[eExX]/.test(fragment) && + esutils.code.isDecimalDigit(fragment.charCodeAt(fragment.length - 1)) && + !(fragment.length >= 2 && fragment.charCodeAt(0) === 48) // '0' + ) { + result.push(' '); + } + } + result.push('.'); + result.push(generateIdentifier(expr.property)); + } + + return parenthesize(result, Precedence.Member, precedence); + }, + + MetaProperty: function (expr, precedence, flags) { + var result; + result = []; + result.push(typeof expr.meta === "string" ? expr.meta : generateIdentifier(expr.meta)); + result.push('.'); + result.push(typeof expr.property === "string" ? expr.property : generateIdentifier(expr.property)); + return parenthesize(result, Precedence.Member, precedence); + }, + + UnaryExpression: function (expr, precedence, flags) { + var result, fragment, rightCharCode, leftSource, leftCharCode; + fragment = this.generateExpression(expr.argument, Precedence.Unary, E_TTT); + + if (space === '') { + result = join(expr.operator, fragment); + } else { + result = [expr.operator]; + if (expr.operator.length > 2) { + // delete, void, typeof + // get `typeof []`, not `typeof[]` + result = join(result, fragment); + } else { + // Prevent inserting spaces between operator and argument if it is unnecessary + // like, `!cond` + leftSource = toSourceNodeWhenNeeded(result).toString(); + leftCharCode = leftSource.charCodeAt(leftSource.length - 1); + rightCharCode = fragment.toString().charCodeAt(0); + + if (((leftCharCode === 0x2B /* + */ || leftCharCode === 0x2D /* - */) && leftCharCode === rightCharCode) || + (esutils.code.isIdentifierPartES5(leftCharCode) && esutils.code.isIdentifierPartES5(rightCharCode))) { + result.push(noEmptySpace()); + result.push(fragment); + } else { + result.push(fragment); + } + } + } + return parenthesize(result, Precedence.Unary, precedence); + }, + + YieldExpression: function (expr, precedence, flags) { + var result; + if (expr.delegate) { + result = 'yield*'; + } else { + result = 'yield'; + } + if (expr.argument) { + result = join( + result, + this.generateExpression(expr.argument, Precedence.Yield, E_TTT) + ); + } + return parenthesize(result, Precedence.Yield, precedence); + }, + + AwaitExpression: function (expr, precedence, flags) { + var result = join( + expr.all ? 'await*' : 'await', + this.generateExpression(expr.argument, Precedence.Await, E_TTT) + ); + return parenthesize(result, Precedence.Await, precedence); + }, + + UpdateExpression: function (expr, precedence, flags) { + if (expr.prefix) { + return parenthesize( + [ + expr.operator, + this.generateExpression(expr.argument, Precedence.Unary, E_TTT) + ], + Precedence.Unary, + precedence + ); + } + return parenthesize( + [ + this.generateExpression(expr.argument, Precedence.Postfix, E_TTT), + expr.operator + ], + Precedence.Postfix, + precedence + ); + }, + + FunctionExpression: function (expr, precedence, flags) { + var result = [ + generateAsyncPrefix(expr, true), + 'function' + ]; + if (expr.id) { + result.push(generateStarSuffix(expr) || noEmptySpace()); + result.push(generateIdentifier(expr.id)); + } else { + result.push(generateStarSuffix(expr) || space); + } + result.push(this.generateFunctionBody(expr)); + return result; + }, + + ArrayPattern: function (expr, precedence, flags) { + return this.ArrayExpression(expr, precedence, flags, true); + }, + + ArrayExpression: function (expr, precedence, flags, isPattern) { + var result, multiline, that = this; + if (!expr.elements.length) { + return '[]'; + } + multiline = isPattern ? false : expr.elements.length > 1; + result = ['[', multiline ? newline : '']; + withIndent(function (indent) { + var i, iz; + for (i = 0, iz = expr.elements.length; i < iz; ++i) { + if (!expr.elements[i]) { + if (multiline) { + result.push(indent); + } + if (i + 1 === iz) { + result.push(','); + } + } else { + result.push(multiline ? indent : ''); + result.push(that.generateExpression(expr.elements[i], Precedence.Assignment, E_TTT)); + } + if (i + 1 < iz) { + result.push(',' + (multiline ? newline : space)); + } + } + }); + if (multiline && !endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push(newline); + } + result.push(multiline ? base : ''); + result.push(']'); + return result; + }, + + RestElement: function(expr, precedence, flags) { + return '...' + this.generatePattern(expr.argument); + }, + + ClassExpression: function (expr, precedence, flags) { + var result, fragment; + result = ['class']; + if (expr.id) { + result = join(result, this.generateExpression(expr.id, Precedence.Sequence, E_TTT)); + } + if (expr.superClass) { + fragment = join('extends', this.generateExpression(expr.superClass, Precedence.Unary, E_TTT)); + result = join(result, fragment); + } + result.push(space); + result.push(this.generateStatement(expr.body, S_TFFT)); + return result; + }, + + MethodDefinition: function (expr, precedence, flags) { + var result, fragment; + if (expr['static']) { + result = ['static' + space]; + } else { + result = []; + } + if (expr.kind === 'get' || expr.kind === 'set') { + fragment = [ + join(expr.kind, this.generatePropertyKey(expr.key, expr.computed)), + this.generateFunctionBody(expr.value) + ]; + } else { + fragment = [ + generateMethodPrefix(expr), + this.generatePropertyKey(expr.key, expr.computed), + this.generateFunctionBody(expr.value) + ]; + } + return join(result, fragment); + }, + + Property: function (expr, precedence, flags) { + if (expr.kind === 'get' || expr.kind === 'set') { + return [ + expr.kind, noEmptySpace(), + this.generatePropertyKey(expr.key, expr.computed), + this.generateFunctionBody(expr.value) + ]; + } + + if (expr.shorthand) { + if (expr.value.type === "AssignmentPattern") { + return this.AssignmentPattern(expr.value, Precedence.Sequence, E_TTT); + } + return this.generatePropertyKey(expr.key, expr.computed); + } + + if (expr.method) { + return [ + generateMethodPrefix(expr), + this.generatePropertyKey(expr.key, expr.computed), + this.generateFunctionBody(expr.value) + ]; + } + + return [ + this.generatePropertyKey(expr.key, expr.computed), + ':' + space, + this.generateExpression(expr.value, Precedence.Assignment, E_TTT) + ]; + }, + + ObjectExpression: function (expr, precedence, flags) { + var multiline, result, fragment, that = this; + + if (!expr.properties.length) { + return '{}'; + } + multiline = expr.properties.length > 1; + + withIndent(function () { + fragment = that.generateExpression(expr.properties[0], Precedence.Sequence, E_TTT); + }); + + if (!multiline) { + // issues 4 + // Do not transform from + // dejavu.Class.declare({ + // method2: function () {} + // }); + // to + // dejavu.Class.declare({method2: function () { + // }}); + if (!hasLineTerminator(toSourceNodeWhenNeeded(fragment).toString())) { + return [ '{', space, fragment, space, '}' ]; + } + } + + withIndent(function (indent) { + var i, iz; + result = [ '{', newline, indent, fragment ]; + + if (multiline) { + result.push(',' + newline); + for (i = 1, iz = expr.properties.length; i < iz; ++i) { + result.push(indent); + result.push(that.generateExpression(expr.properties[i], Precedence.Sequence, E_TTT)); + if (i + 1 < iz) { + result.push(',' + newline); + } + } + } + }); + + if (!endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push(newline); + } + result.push(base); + result.push('}'); + return result; + }, + + AssignmentPattern: function(expr, precedence, flags) { + return this.generateAssignment(expr.left, expr.right, '=', precedence, flags); + }, + + ObjectPattern: function (expr, precedence, flags) { + var result, i, iz, multiline, property, that = this; + if (!expr.properties.length) { + return '{}'; + } + + multiline = false; + if (expr.properties.length === 1) { + property = expr.properties[0]; + if ( + property.type === Syntax.Property + && property.value.type !== Syntax.Identifier + ) { + multiline = true; + } + } else { + for (i = 0, iz = expr.properties.length; i < iz; ++i) { + property = expr.properties[i]; + if ( + property.type === Syntax.Property + && !property.shorthand + ) { + multiline = true; + break; + } + } + } + result = ['{', multiline ? newline : '' ]; + + withIndent(function (indent) { + var i, iz; + for (i = 0, iz = expr.properties.length; i < iz; ++i) { + result.push(multiline ? indent : ''); + result.push(that.generateExpression(expr.properties[i], Precedence.Sequence, E_TTT)); + if (i + 1 < iz) { + result.push(',' + (multiline ? newline : space)); + } + } + }); + + if (multiline && !endsWithLineTerminator(toSourceNodeWhenNeeded(result).toString())) { + result.push(newline); + } + result.push(multiline ? base : ''); + result.push('}'); + return result; + }, + + ThisExpression: function (expr, precedence, flags) { + return 'this'; + }, + + Super: function (expr, precedence, flags) { + return 'super'; + }, + + Identifier: function (expr, precedence, flags) { + return generateIdentifier(expr); + }, + + ImportDefaultSpecifier: function (expr, precedence, flags) { + return generateIdentifier(expr.id || expr.local); + }, + + ImportNamespaceSpecifier: function (expr, precedence, flags) { + var result = ['*']; + var id = expr.id || expr.local; + if (id) { + result.push(space + 'as' + noEmptySpace() + generateIdentifier(id)); + } + return result; + }, + + ImportSpecifier: function (expr, precedence, flags) { + var imported = expr.imported; + var result = [ imported.name ]; + var local = expr.local; + if (local && local.name !== imported.name) { + result.push(noEmptySpace() + 'as' + noEmptySpace() + generateIdentifier(local)); + } + return result; + }, + + ExportSpecifier: function (expr, precedence, flags) { + var local = expr.local; + var result = [ local.name ]; + var exported = expr.exported; + if (exported && exported.name !== local.name) { + result.push(noEmptySpace() + 'as' + noEmptySpace() + generateIdentifier(exported)); + } + return result; + }, + + Literal: function (expr, precedence, flags) { + var raw; + if (expr.hasOwnProperty('raw') && parse && extra.raw) { + try { + raw = parse(expr.raw).body[0].expression; + if (raw.type === Syntax.Literal) { + if (raw.value === expr.value) { + return expr.raw; + } + } + } catch (e) { + // not use raw property + } + } + + if (expr.regex) { + return '/' + expr.regex.pattern + '/' + expr.regex.flags; + } + + if (expr.value === null) { + return 'null'; + } + + if (typeof expr.value === 'string') { + return escapeString(expr.value); + } + + if (typeof expr.value === 'number') { + return generateNumber(expr.value); + } + + if (typeof expr.value === 'boolean') { + return expr.value ? 'true' : 'false'; + } + + return generateRegExp(expr.value); + }, + + GeneratorExpression: function (expr, precedence, flags) { + return this.ComprehensionExpression(expr, precedence, flags); + }, + + ComprehensionExpression: function (expr, precedence, flags) { + // GeneratorExpression should be parenthesized with (...), ComprehensionExpression with [...] + // Due to https://bugzilla.mozilla.org/show_bug.cgi?id=883468 position of expr.body can differ in Spidermonkey and ES6 + + var result, i, iz, fragment, that = this; + result = (expr.type === Syntax.GeneratorExpression) ? ['('] : ['[']; + + if (extra.moz.comprehensionExpressionStartsWithAssignment) { + fragment = this.generateExpression(expr.body, Precedence.Assignment, E_TTT); + result.push(fragment); + } + + if (expr.blocks) { + withIndent(function () { + for (i = 0, iz = expr.blocks.length; i < iz; ++i) { + fragment = that.generateExpression(expr.blocks[i], Precedence.Sequence, E_TTT); + if (i > 0 || extra.moz.comprehensionExpressionStartsWithAssignment) { + result = join(result, fragment); + } else { + result.push(fragment); + } + } + }); + } + + if (expr.filter) { + result = join(result, 'if' + space); + fragment = this.generateExpression(expr.filter, Precedence.Sequence, E_TTT); + result = join(result, [ '(', fragment, ')' ]); + } + + if (!extra.moz.comprehensionExpressionStartsWithAssignment) { + fragment = this.generateExpression(expr.body, Precedence.Assignment, E_TTT); + + result = join(result, fragment); + } + + result.push((expr.type === Syntax.GeneratorExpression) ? ')' : ']'); + return result; + }, + + ComprehensionBlock: function (expr, precedence, flags) { + var fragment; + if (expr.left.type === Syntax.VariableDeclaration) { + fragment = [ + expr.left.kind, noEmptySpace(), + this.generateStatement(expr.left.declarations[0], S_FFFF) + ]; + } else { + fragment = this.generateExpression(expr.left, Precedence.Call, E_TTT); + } + + fragment = join(fragment, expr.of ? 'of' : 'in'); + fragment = join(fragment, this.generateExpression(expr.right, Precedence.Sequence, E_TTT)); + + return [ 'for' + space + '(', fragment, ')' ]; + }, + + SpreadElement: function (expr, precedence, flags) { + return [ + '...', + this.generateExpression(expr.argument, Precedence.Assignment, E_TTT) + ]; + }, + + TaggedTemplateExpression: function (expr, precedence, flags) { + var itemFlags = E_TTF; + if (!(flags & F_ALLOW_CALL)) { + itemFlags = E_TFF; + } + var result = [ + this.generateExpression(expr.tag, Precedence.Call, itemFlags), + this.generateExpression(expr.quasi, Precedence.Primary, E_FFT) + ]; + return parenthesize(result, Precedence.TaggedTemplate, precedence); + }, + + TemplateElement: function (expr, precedence, flags) { + // Don't use "cooked". Since tagged template can use raw template + // representation. So if we do so, it breaks the script semantics. + return expr.value.raw; + }, + + TemplateLiteral: function (expr, precedence, flags) { + var result, i, iz; + result = [ '`' ]; + for (i = 0, iz = expr.quasis.length; i < iz; ++i) { + result.push(this.generateExpression(expr.quasis[i], Precedence.Primary, E_TTT)); + if (i + 1 < iz) { + result.push('${' + space); + result.push(this.generateExpression(expr.expressions[i], Precedence.Sequence, E_TTT)); + result.push(space + '}'); + } + } + result.push('`'); + return result; + }, + + ModuleSpecifier: function (expr, precedence, flags) { + return this.Literal(expr, precedence, flags); + }, + + ImportExpression: function(expr, precedence, flag) { + return parenthesize([ + 'import(', + this.generateExpression(expr.source, Precedence.Assignment, E_TTT), + ')' + ], Precedence.Call, precedence); + }, + + }; + + merge(CodeGenerator.prototype, CodeGenerator.Expression); + + CodeGenerator.prototype.generateExpression = function (expr, precedence, flags) { + var result, type; + + type = expr.type || Syntax.Property; + + if (extra.verbatim && expr.hasOwnProperty(extra.verbatim)) { + return generateVerbatim(expr, precedence); + } + + result = this[type](expr, precedence, flags); + + + if (extra.comment) { + result = addComments(expr, result); + } + return toSourceNodeWhenNeeded(result, expr); + }; + + CodeGenerator.prototype.generateStatement = function (stmt, flags) { + var result, + fragment; + + result = this[stmt.type](stmt, flags); + + // Attach comments + + if (extra.comment) { + result = addComments(stmt, result); + } + + fragment = toSourceNodeWhenNeeded(result).toString(); + if (stmt.type === Syntax.Program && !safeConcatenation && newline === '' && fragment.charAt(fragment.length - 1) === '\n') { + result = sourceMap ? toSourceNodeWhenNeeded(result).replaceRight(/\s+$/, '') : fragment.replace(/\s+$/, ''); + } + + return toSourceNodeWhenNeeded(result, stmt); + }; + + function generateInternal(node) { + var codegen; + + codegen = new CodeGenerator(); + if (isStatement(node)) { + return codegen.generateStatement(node, S_TFFF); + } + + if (isExpression(node)) { + return codegen.generateExpression(node, Precedence.Sequence, E_TTT); + } + + throw new Error('Unknown node type: ' + node.type); + } + + function generate(node, options) { + var defaultOptions = getDefaultOptions(), result, pair; + + if (options != null) { + // Obsolete options + // + // `options.indent` + // `options.base` + // + // Instead of them, we can use `option.format.indent`. + if (typeof options.indent === 'string') { + defaultOptions.format.indent.style = options.indent; + } + if (typeof options.base === 'number') { + defaultOptions.format.indent.base = options.base; + } + options = updateDeeply(defaultOptions, options); + indent = options.format.indent.style; + if (typeof options.base === 'string') { + base = options.base; + } else { + base = stringRepeat(indent, options.format.indent.base); + } + } else { + options = defaultOptions; + indent = options.format.indent.style; + base = stringRepeat(indent, options.format.indent.base); + } + json = options.format.json; + renumber = options.format.renumber; + hexadecimal = json ? false : options.format.hexadecimal; + quotes = json ? 'double' : options.format.quotes; + escapeless = options.format.escapeless; + newline = options.format.newline; + space = options.format.space; + if (options.format.compact) { + newline = space = indent = base = ''; + } + parentheses = options.format.parentheses; + semicolons = options.format.semicolons; + safeConcatenation = options.format.safeConcatenation; + directive = options.directive; + parse = json ? null : options.parse; + sourceMap = options.sourceMap; + sourceCode = options.sourceCode; + preserveBlankLines = options.format.preserveBlankLines && sourceCode !== null; + extra = options; + + if (sourceMap) { + if (!exports.browser) { + // We assume environment is node.js + // And prevent from including source-map by browserify + SourceNode = (__webpack_require__(/*! source-map */ "../../../.yarn/berry/cache/source-map-npm-0.6.1-1a3621db16-9.zip/node_modules/source-map/source-map.js").SourceNode); + } else { + SourceNode = global.sourceMap.SourceNode; + } + } + + result = generateInternal(node); + + if (!sourceMap) { + pair = {code: result.toString(), map: null}; + return options.sourceMapWithCode ? pair : pair.code; + } + + + pair = result.toStringWithSourceMap({ + file: options.file, + sourceRoot: options.sourceMapRoot + }); + + if (options.sourceContent) { + pair.map.setSourceContent(options.sourceMap, + options.sourceContent); + } + + if (options.sourceMapWithCode) { + return pair; + } + + return pair.map.toString(); + } + + FORMAT_MINIFY = { + indent: { + style: '', + base: 0 + }, + renumber: true, + hexadecimal: true, + quotes: 'auto', + escapeless: true, + compact: true, + parentheses: false, + semicolons: false + }; + + FORMAT_DEFAULTS = getDefaultOptions().format; + + exports.version = __webpack_require__(/*! ./package.json */ "../../../.yarn/berry/cache/escodegen-npm-1.14.3-a4dedc6eeb-9.zip/node_modules/escodegen/package.json").version; + exports.generate = generate; + exports.attachComments = estraverse.attachComments; + exports.Precedence = updateDeeply({}, Precedence); + exports.browser = false; + exports.FORMAT_MINIFY = FORMAT_MINIFY; + exports.FORMAT_DEFAULTS = FORMAT_DEFAULTS; +}()); +/* vim: set sw=4 ts=4 et tw=80 : */ + + +/***/ }), + +/***/ "../../../.yarn/berry/cache/esprima-npm-4.0.1-1084e98778-9.zip/node_modules/esprima/dist/esprima.js": +/*!**********************************************************************************************************!*\ + !*** ../../../.yarn/berry/cache/esprima-npm-4.0.1-1084e98778-9.zip/node_modules/esprima/dist/esprima.js ***! + \**********************************************************************************************************/ +/***/ (function(module) { + +(function webpackUniversalModuleDefinition(root, factory) { +/* istanbul ignore next */ + if(true) + module.exports = factory(); + else {} +})(this, function() { +return /******/ (function(modules) { // webpackBootstrap +/******/ // The module cache +/******/ var installedModules = {}; + +/******/ // The require function +/******/ function __nested_webpack_require_583__(moduleId) { + +/******/ // Check if module is in cache +/* istanbul ignore if */ +/******/ if(installedModules[moduleId]) +/******/ return installedModules[moduleId].exports; + +/******/ // Create a new module (and put it into the cache) +/******/ var module = installedModules[moduleId] = { +/******/ exports: {}, +/******/ id: moduleId, +/******/ loaded: false +/******/ }; + +/******/ // Execute the module function +/******/ modules[moduleId].call(module.exports, module, module.exports, __nested_webpack_require_583__); + +/******/ // Flag the module as loaded +/******/ module.loaded = true; + +/******/ // Return the exports of the module +/******/ return module.exports; +/******/ } + + +/******/ // expose the modules object (__webpack_modules__) +/******/ __nested_webpack_require_583__.m = modules; + +/******/ // expose the module cache +/******/ __nested_webpack_require_583__.c = installedModules; + +/******/ // __webpack_public_path__ +/******/ __nested_webpack_require_583__.p = ""; + +/******/ // Load entry module and return exports +/******/ return __nested_webpack_require_583__(0); +/******/ }) +/************************************************************************/ +/******/ ([ +/* 0 */ +/***/ function(module, exports, __nested_webpack_require_1808__) { + + "use strict"; + /* + Copyright JS Foundation and other contributors, https://js.foundation/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + Object.defineProperty(exports, "__esModule", { value: true }); + var comment_handler_1 = __nested_webpack_require_1808__(1); + var jsx_parser_1 = __nested_webpack_require_1808__(3); + var parser_1 = __nested_webpack_require_1808__(8); + var tokenizer_1 = __nested_webpack_require_1808__(15); + function parse(code, options, delegate) { + var commentHandler = null; + var proxyDelegate = function (node, metadata) { + if (delegate) { + delegate(node, metadata); + } + if (commentHandler) { + commentHandler.visit(node, metadata); + } + }; + var parserDelegate = (typeof delegate === 'function') ? proxyDelegate : null; + var collectComment = false; + if (options) { + collectComment = (typeof options.comment === 'boolean' && options.comment); + var attachComment = (typeof options.attachComment === 'boolean' && options.attachComment); + if (collectComment || attachComment) { + commentHandler = new comment_handler_1.CommentHandler(); + commentHandler.attach = attachComment; + options.comment = true; + parserDelegate = proxyDelegate; + } + } + var isModule = false; + if (options && typeof options.sourceType === 'string') { + isModule = (options.sourceType === 'module'); + } + var parser; + if (options && typeof options.jsx === 'boolean' && options.jsx) { + parser = new jsx_parser_1.JSXParser(code, options, parserDelegate); + } + else { + parser = new parser_1.Parser(code, options, parserDelegate); + } + var program = isModule ? parser.parseModule() : parser.parseScript(); + var ast = program; + if (collectComment && commentHandler) { + ast.comments = commentHandler.comments; + } + if (parser.config.tokens) { + ast.tokens = parser.tokens; + } + if (parser.config.tolerant) { + ast.errors = parser.errorHandler.errors; + } + return ast; + } + exports.parse = parse; + function parseModule(code, options, delegate) { + var parsingOptions = options || {}; + parsingOptions.sourceType = 'module'; + return parse(code, parsingOptions, delegate); + } + exports.parseModule = parseModule; + function parseScript(code, options, delegate) { + var parsingOptions = options || {}; + parsingOptions.sourceType = 'script'; + return parse(code, parsingOptions, delegate); + } + exports.parseScript = parseScript; + function tokenize(code, options, delegate) { + var tokenizer = new tokenizer_1.Tokenizer(code, options); + var tokens; + tokens = []; + try { + while (true) { + var token = tokenizer.getNextToken(); + if (!token) { + break; + } + if (delegate) { + token = delegate(token); + } + tokens.push(token); + } + } + catch (e) { + tokenizer.errorHandler.tolerate(e); + } + if (tokenizer.errorHandler.tolerant) { + tokens.errors = tokenizer.errors(); + } + return tokens; + } + exports.tokenize = tokenize; + var syntax_1 = __nested_webpack_require_1808__(2); + exports.Syntax = syntax_1.Syntax; + // Sync with *.json manifests. + exports.version = '4.0.1'; + + +/***/ }, +/* 1 */ +/***/ function(module, exports, __nested_webpack_require_6456__) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + var syntax_1 = __nested_webpack_require_6456__(2); + var CommentHandler = (function () { + function CommentHandler() { + this.attach = false; + this.comments = []; + this.stack = []; + this.leading = []; + this.trailing = []; + } + CommentHandler.prototype.insertInnerComments = function (node, metadata) { + // innnerComments for properties empty block + // `function a() {/** comments **\/}` + if (node.type === syntax_1.Syntax.BlockStatement && node.body.length === 0) { + var innerComments = []; + for (var i = this.leading.length - 1; i >= 0; --i) { + var entry = this.leading[i]; + if (metadata.end.offset >= entry.start) { + innerComments.unshift(entry.comment); + this.leading.splice(i, 1); + this.trailing.splice(i, 1); + } + } + if (innerComments.length) { + node.innerComments = innerComments; + } + } + }; + CommentHandler.prototype.findTrailingComments = function (metadata) { + var trailingComments = []; + if (this.trailing.length > 0) { + for (var i = this.trailing.length - 1; i >= 0; --i) { + var entry_1 = this.trailing[i]; + if (entry_1.start >= metadata.end.offset) { + trailingComments.unshift(entry_1.comment); + } + } + this.trailing.length = 0; + return trailingComments; + } + var entry = this.stack[this.stack.length - 1]; + if (entry && entry.node.trailingComments) { + var firstComment = entry.node.trailingComments[0]; + if (firstComment && firstComment.range[0] >= metadata.end.offset) { + trailingComments = entry.node.trailingComments; + delete entry.node.trailingComments; + } + } + return trailingComments; + }; + CommentHandler.prototype.findLeadingComments = function (metadata) { + var leadingComments = []; + var target; + while (this.stack.length > 0) { + var entry = this.stack[this.stack.length - 1]; + if (entry && entry.start >= metadata.start.offset) { + target = entry.node; + this.stack.pop(); + } + else { + break; + } + } + if (target) { + var count = target.leadingComments ? target.leadingComments.length : 0; + for (var i = count - 1; i >= 0; --i) { + var comment = target.leadingComments[i]; + if (comment.range[1] <= metadata.start.offset) { + leadingComments.unshift(comment); + target.leadingComments.splice(i, 1); + } + } + if (target.leadingComments && target.leadingComments.length === 0) { + delete target.leadingComments; + } + return leadingComments; + } + for (var i = this.leading.length - 1; i >= 0; --i) { + var entry = this.leading[i]; + if (entry.start <= metadata.start.offset) { + leadingComments.unshift(entry.comment); + this.leading.splice(i, 1); + } + } + return leadingComments; + }; + CommentHandler.prototype.visitNode = function (node, metadata) { + if (node.type === syntax_1.Syntax.Program && node.body.length > 0) { + return; + } + this.insertInnerComments(node, metadata); + var trailingComments = this.findTrailingComments(metadata); + var leadingComments = this.findLeadingComments(metadata); + if (leadingComments.length > 0) { + node.leadingComments = leadingComments; + } + if (trailingComments.length > 0) { + node.trailingComments = trailingComments; + } + this.stack.push({ + node: node, + start: metadata.start.offset + }); + }; + CommentHandler.prototype.visitComment = function (node, metadata) { + var type = (node.type[0] === 'L') ? 'Line' : 'Block'; + var comment = { + type: type, + value: node.value + }; + if (node.range) { + comment.range = node.range; + } + if (node.loc) { + comment.loc = node.loc; + } + this.comments.push(comment); + if (this.attach) { + var entry = { + comment: { + type: type, + value: node.value, + range: [metadata.start.offset, metadata.end.offset] + }, + start: metadata.start.offset + }; + if (node.loc) { + entry.comment.loc = node.loc; + } + node.type = type; + this.leading.push(entry); + this.trailing.push(entry); + } + }; + CommentHandler.prototype.visit = function (node, metadata) { + if (node.type === 'LineComment') { + this.visitComment(node, metadata); + } + else if (node.type === 'BlockComment') { + this.visitComment(node, metadata); + } + else if (this.attach) { + this.visitNode(node, metadata); + } + }; + return CommentHandler; + }()); + exports.CommentHandler = CommentHandler; + + +/***/ }, +/* 2 */ +/***/ function(module, exports) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + exports.Syntax = { + AssignmentExpression: 'AssignmentExpression', + AssignmentPattern: 'AssignmentPattern', + ArrayExpression: 'ArrayExpression', + ArrayPattern: 'ArrayPattern', + ArrowFunctionExpression: 'ArrowFunctionExpression', + AwaitExpression: 'AwaitExpression', + BlockStatement: 'BlockStatement', + BinaryExpression: 'BinaryExpression', + BreakStatement: 'BreakStatement', + CallExpression: 'CallExpression', + CatchClause: 'CatchClause', + ClassBody: 'ClassBody', + ClassDeclaration: 'ClassDeclaration', + ClassExpression: 'ClassExpression', + ConditionalExpression: 'ConditionalExpression', + ContinueStatement: 'ContinueStatement', + DoWhileStatement: 'DoWhileStatement', + DebuggerStatement: 'DebuggerStatement', + EmptyStatement: 'EmptyStatement', + ExportAllDeclaration: 'ExportAllDeclaration', + ExportDefaultDeclaration: 'ExportDefaultDeclaration', + ExportNamedDeclaration: 'ExportNamedDeclaration', + ExportSpecifier: 'ExportSpecifier', + ExpressionStatement: 'ExpressionStatement', + ForStatement: 'ForStatement', + ForOfStatement: 'ForOfStatement', + ForInStatement: 'ForInStatement', + FunctionDeclaration: 'FunctionDeclaration', + FunctionExpression: 'FunctionExpression', + Identifier: 'Identifier', + IfStatement: 'IfStatement', + ImportDeclaration: 'ImportDeclaration', + ImportDefaultSpecifier: 'ImportDefaultSpecifier', + ImportNamespaceSpecifier: 'ImportNamespaceSpecifier', + ImportSpecifier: 'ImportSpecifier', + Literal: 'Literal', + LabeledStatement: 'LabeledStatement', + LogicalExpression: 'LogicalExpression', + MemberExpression: 'MemberExpression', + MetaProperty: 'MetaProperty', + MethodDefinition: 'MethodDefinition', + NewExpression: 'NewExpression', + ObjectExpression: 'ObjectExpression', + ObjectPattern: 'ObjectPattern', + Program: 'Program', + Property: 'Property', + RestElement: 'RestElement', + ReturnStatement: 'ReturnStatement', + SequenceExpression: 'SequenceExpression', + SpreadElement: 'SpreadElement', + Super: 'Super', + SwitchCase: 'SwitchCase', + SwitchStatement: 'SwitchStatement', + TaggedTemplateExpression: 'TaggedTemplateExpression', + TemplateElement: 'TemplateElement', + TemplateLiteral: 'TemplateLiteral', + ThisExpression: 'ThisExpression', + ThrowStatement: 'ThrowStatement', + TryStatement: 'TryStatement', + UnaryExpression: 'UnaryExpression', + UpdateExpression: 'UpdateExpression', + VariableDeclaration: 'VariableDeclaration', + VariableDeclarator: 'VariableDeclarator', + WhileStatement: 'WhileStatement', + WithStatement: 'WithStatement', + YieldExpression: 'YieldExpression' + }; + + +/***/ }, +/* 3 */ +/***/ function(module, exports, __nested_webpack_require_15019__) { + + "use strict"; +/* istanbul ignore next */ + var __extends = (this && this.__extends) || (function () { + var extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; + return function (d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; + })(); + Object.defineProperty(exports, "__esModule", { value: true }); + var character_1 = __nested_webpack_require_15019__(4); + var JSXNode = __nested_webpack_require_15019__(5); + var jsx_syntax_1 = __nested_webpack_require_15019__(6); + var Node = __nested_webpack_require_15019__(7); + var parser_1 = __nested_webpack_require_15019__(8); + var token_1 = __nested_webpack_require_15019__(13); + var xhtml_entities_1 = __nested_webpack_require_15019__(14); + token_1.TokenName[100 /* Identifier */] = 'JSXIdentifier'; + token_1.TokenName[101 /* Text */] = 'JSXText'; + // Fully qualified element name, e.g. returns "svg:path" + function getQualifiedElementName(elementName) { + var qualifiedName; + switch (elementName.type) { + case jsx_syntax_1.JSXSyntax.JSXIdentifier: + var id = elementName; + qualifiedName = id.name; + break; + case jsx_syntax_1.JSXSyntax.JSXNamespacedName: + var ns = elementName; + qualifiedName = getQualifiedElementName(ns.namespace) + ':' + + getQualifiedElementName(ns.name); + break; + case jsx_syntax_1.JSXSyntax.JSXMemberExpression: + var expr = elementName; + qualifiedName = getQualifiedElementName(expr.object) + '.' + + getQualifiedElementName(expr.property); + break; + /* istanbul ignore next */ + default: + break; + } + return qualifiedName; + } + var JSXParser = (function (_super) { + __extends(JSXParser, _super); + function JSXParser(code, options, delegate) { + return _super.call(this, code, options, delegate) || this; + } + JSXParser.prototype.parsePrimaryExpression = function () { + return this.match('<') ? this.parseJSXRoot() : _super.prototype.parsePrimaryExpression.call(this); + }; + JSXParser.prototype.startJSX = function () { + // Unwind the scanner before the lookahead token. + this.scanner.index = this.startMarker.index; + this.scanner.lineNumber = this.startMarker.line; + this.scanner.lineStart = this.startMarker.index - this.startMarker.column; + }; + JSXParser.prototype.finishJSX = function () { + // Prime the next lookahead. + this.nextToken(); + }; + JSXParser.prototype.reenterJSX = function () { + this.startJSX(); + this.expectJSX('}'); + // Pop the closing '}' added from the lookahead. + if (this.config.tokens) { + this.tokens.pop(); + } + }; + JSXParser.prototype.createJSXNode = function () { + this.collectComments(); + return { + index: this.scanner.index, + line: this.scanner.lineNumber, + column: this.scanner.index - this.scanner.lineStart + }; + }; + JSXParser.prototype.createJSXChildNode = function () { + return { + index: this.scanner.index, + line: this.scanner.lineNumber, + column: this.scanner.index - this.scanner.lineStart + }; + }; + JSXParser.prototype.scanXHTMLEntity = function (quote) { + var result = '&'; + var valid = true; + var terminated = false; + var numeric = false; + var hex = false; + while (!this.scanner.eof() && valid && !terminated) { + var ch = this.scanner.source[this.scanner.index]; + if (ch === quote) { + break; + } + terminated = (ch === ';'); + result += ch; + ++this.scanner.index; + if (!terminated) { + switch (result.length) { + case 2: + // e.g. '{' + numeric = (ch === '#'); + break; + case 3: + if (numeric) { + // e.g. 'A' + hex = (ch === 'x'); + valid = hex || character_1.Character.isDecimalDigit(ch.charCodeAt(0)); + numeric = numeric && !hex; + } + break; + default: + valid = valid && !(numeric && !character_1.Character.isDecimalDigit(ch.charCodeAt(0))); + valid = valid && !(hex && !character_1.Character.isHexDigit(ch.charCodeAt(0))); + break; + } + } + } + if (valid && terminated && result.length > 2) { + // e.g. 'A' becomes just '#x41' + var str = result.substr(1, result.length - 2); + if (numeric && str.length > 1) { + result = String.fromCharCode(parseInt(str.substr(1), 10)); + } + else if (hex && str.length > 2) { + result = String.fromCharCode(parseInt('0' + str.substr(1), 16)); + } + else if (!numeric && !hex && xhtml_entities_1.XHTMLEntities[str]) { + result = xhtml_entities_1.XHTMLEntities[str]; + } + } + return result; + }; + // Scan the next JSX token. This replaces Scanner#lex when in JSX mode. + JSXParser.prototype.lexJSX = function () { + var cp = this.scanner.source.charCodeAt(this.scanner.index); + // < > / : = { } + if (cp === 60 || cp === 62 || cp === 47 || cp === 58 || cp === 61 || cp === 123 || cp === 125) { + var value = this.scanner.source[this.scanner.index++]; + return { + type: 7 /* Punctuator */, + value: value, + lineNumber: this.scanner.lineNumber, + lineStart: this.scanner.lineStart, + start: this.scanner.index - 1, + end: this.scanner.index + }; + } + // " ' + if (cp === 34 || cp === 39) { + var start = this.scanner.index; + var quote = this.scanner.source[this.scanner.index++]; + var str = ''; + while (!this.scanner.eof()) { + var ch = this.scanner.source[this.scanner.index++]; + if (ch === quote) { + break; + } + else if (ch === '&') { + str += this.scanXHTMLEntity(quote); + } + else { + str += ch; + } + } + return { + type: 8 /* StringLiteral */, + value: str, + lineNumber: this.scanner.lineNumber, + lineStart: this.scanner.lineStart, + start: start, + end: this.scanner.index + }; + } + // ... or . + if (cp === 46) { + var n1 = this.scanner.source.charCodeAt(this.scanner.index + 1); + var n2 = this.scanner.source.charCodeAt(this.scanner.index + 2); + var value = (n1 === 46 && n2 === 46) ? '...' : '.'; + var start = this.scanner.index; + this.scanner.index += value.length; + return { + type: 7 /* Punctuator */, + value: value, + lineNumber: this.scanner.lineNumber, + lineStart: this.scanner.lineStart, + start: start, + end: this.scanner.index + }; + } + // ` + if (cp === 96) { + // Only placeholder, since it will be rescanned as a real assignment expression. + return { + type: 10 /* Template */, + value: '', + lineNumber: this.scanner.lineNumber, + lineStart: this.scanner.lineStart, + start: this.scanner.index, + end: this.scanner.index + }; + } + // Identifer can not contain backslash (char code 92). + if (character_1.Character.isIdentifierStart(cp) && (cp !== 92)) { + var start = this.scanner.index; + ++this.scanner.index; + while (!this.scanner.eof()) { + var ch = this.scanner.source.charCodeAt(this.scanner.index); + if (character_1.Character.isIdentifierPart(ch) && (ch !== 92)) { + ++this.scanner.index; + } + else if (ch === 45) { + // Hyphen (char code 45) can be part of an identifier. + ++this.scanner.index; + } + else { + break; + } + } + var id = this.scanner.source.slice(start, this.scanner.index); + return { + type: 100 /* Identifier */, + value: id, + lineNumber: this.scanner.lineNumber, + lineStart: this.scanner.lineStart, + start: start, + end: this.scanner.index + }; + } + return this.scanner.lex(); + }; + JSXParser.prototype.nextJSXToken = function () { + this.collectComments(); + this.startMarker.index = this.scanner.index; + this.startMarker.line = this.scanner.lineNumber; + this.startMarker.column = this.scanner.index - this.scanner.lineStart; + var token = this.lexJSX(); + this.lastMarker.index = this.scanner.index; + this.lastMarker.line = this.scanner.lineNumber; + this.lastMarker.column = this.scanner.index - this.scanner.lineStart; + if (this.config.tokens) { + this.tokens.push(this.convertToken(token)); + } + return token; + }; + JSXParser.prototype.nextJSXText = function () { + this.startMarker.index = this.scanner.index; + this.startMarker.line = this.scanner.lineNumber; + this.startMarker.column = this.scanner.index - this.scanner.lineStart; + var start = this.scanner.index; + var text = ''; + while (!this.scanner.eof()) { + var ch = this.scanner.source[this.scanner.index]; + if (ch === '{' || ch === '<') { + break; + } + ++this.scanner.index; + text += ch; + if (character_1.Character.isLineTerminator(ch.charCodeAt(0))) { + ++this.scanner.lineNumber; + if (ch === '\r' && this.scanner.source[this.scanner.index] === '\n') { + ++this.scanner.index; + } + this.scanner.lineStart = this.scanner.index; + } + } + this.lastMarker.index = this.scanner.index; + this.lastMarker.line = this.scanner.lineNumber; + this.lastMarker.column = this.scanner.index - this.scanner.lineStart; + var token = { + type: 101 /* Text */, + value: text, + lineNumber: this.scanner.lineNumber, + lineStart: this.scanner.lineStart, + start: start, + end: this.scanner.index + }; + if ((text.length > 0) && this.config.tokens) { + this.tokens.push(this.convertToken(token)); + } + return token; + }; + JSXParser.prototype.peekJSXToken = function () { + var state = this.scanner.saveState(); + this.scanner.scanComments(); + var next = this.lexJSX(); + this.scanner.restoreState(state); + return next; + }; + // Expect the next JSX token to match the specified punctuator. + // If not, an exception will be thrown. + JSXParser.prototype.expectJSX = function (value) { + var token = this.nextJSXToken(); + if (token.type !== 7 /* Punctuator */ || token.value !== value) { + this.throwUnexpectedToken(token); + } + }; + // Return true if the next JSX token matches the specified punctuator. + JSXParser.prototype.matchJSX = function (value) { + var next = this.peekJSXToken(); + return next.type === 7 /* Punctuator */ && next.value === value; + }; + JSXParser.prototype.parseJSXIdentifier = function () { + var node = this.createJSXNode(); + var token = this.nextJSXToken(); + if (token.type !== 100 /* Identifier */) { + this.throwUnexpectedToken(token); + } + return this.finalize(node, new JSXNode.JSXIdentifier(token.value)); + }; + JSXParser.prototype.parseJSXElementName = function () { + var node = this.createJSXNode(); + var elementName = this.parseJSXIdentifier(); + if (this.matchJSX(':')) { + var namespace = elementName; + this.expectJSX(':'); + var name_1 = this.parseJSXIdentifier(); + elementName = this.finalize(node, new JSXNode.JSXNamespacedName(namespace, name_1)); + } + else if (this.matchJSX('.')) { + while (this.matchJSX('.')) { + var object = elementName; + this.expectJSX('.'); + var property = this.parseJSXIdentifier(); + elementName = this.finalize(node, new JSXNode.JSXMemberExpression(object, property)); + } + } + return elementName; + }; + JSXParser.prototype.parseJSXAttributeName = function () { + var node = this.createJSXNode(); + var attributeName; + var identifier = this.parseJSXIdentifier(); + if (this.matchJSX(':')) { + var namespace = identifier; + this.expectJSX(':'); + var name_2 = this.parseJSXIdentifier(); + attributeName = this.finalize(node, new JSXNode.JSXNamespacedName(namespace, name_2)); + } + else { + attributeName = identifier; + } + return attributeName; + }; + JSXParser.prototype.parseJSXStringLiteralAttribute = function () { + var node = this.createJSXNode(); + var token = this.nextJSXToken(); + if (token.type !== 8 /* StringLiteral */) { + this.throwUnexpectedToken(token); + } + var raw = this.getTokenRaw(token); + return this.finalize(node, new Node.Literal(token.value, raw)); + }; + JSXParser.prototype.parseJSXExpressionAttribute = function () { + var node = this.createJSXNode(); + this.expectJSX('{'); + this.finishJSX(); + if (this.match('}')) { + this.tolerateError('JSX attributes must only be assigned a non-empty expression'); + } + var expression = this.parseAssignmentExpression(); + this.reenterJSX(); + return this.finalize(node, new JSXNode.JSXExpressionContainer(expression)); + }; + JSXParser.prototype.parseJSXAttributeValue = function () { + return this.matchJSX('{') ? this.parseJSXExpressionAttribute() : + this.matchJSX('<') ? this.parseJSXElement() : this.parseJSXStringLiteralAttribute(); + }; + JSXParser.prototype.parseJSXNameValueAttribute = function () { + var node = this.createJSXNode(); + var name = this.parseJSXAttributeName(); + var value = null; + if (this.matchJSX('=')) { + this.expectJSX('='); + value = this.parseJSXAttributeValue(); + } + return this.finalize(node, new JSXNode.JSXAttribute(name, value)); + }; + JSXParser.prototype.parseJSXSpreadAttribute = function () { + var node = this.createJSXNode(); + this.expectJSX('{'); + this.expectJSX('...'); + this.finishJSX(); + var argument = this.parseAssignmentExpression(); + this.reenterJSX(); + return this.finalize(node, new JSXNode.JSXSpreadAttribute(argument)); + }; + JSXParser.prototype.parseJSXAttributes = function () { + var attributes = []; + while (!this.matchJSX('/') && !this.matchJSX('>')) { + var attribute = this.matchJSX('{') ? this.parseJSXSpreadAttribute() : + this.parseJSXNameValueAttribute(); + attributes.push(attribute); + } + return attributes; + }; + JSXParser.prototype.parseJSXOpeningElement = function () { + var node = this.createJSXNode(); + this.expectJSX('<'); + var name = this.parseJSXElementName(); + var attributes = this.parseJSXAttributes(); + var selfClosing = this.matchJSX('/'); + if (selfClosing) { + this.expectJSX('/'); + } + this.expectJSX('>'); + return this.finalize(node, new JSXNode.JSXOpeningElement(name, selfClosing, attributes)); + }; + JSXParser.prototype.parseJSXBoundaryElement = function () { + var node = this.createJSXNode(); + this.expectJSX('<'); + if (this.matchJSX('/')) { + this.expectJSX('/'); + var name_3 = this.parseJSXElementName(); + this.expectJSX('>'); + return this.finalize(node, new JSXNode.JSXClosingElement(name_3)); + } + var name = this.parseJSXElementName(); + var attributes = this.parseJSXAttributes(); + var selfClosing = this.matchJSX('/'); + if (selfClosing) { + this.expectJSX('/'); + } + this.expectJSX('>'); + return this.finalize(node, new JSXNode.JSXOpeningElement(name, selfClosing, attributes)); + }; + JSXParser.prototype.parseJSXEmptyExpression = function () { + var node = this.createJSXChildNode(); + this.collectComments(); + this.lastMarker.index = this.scanner.index; + this.lastMarker.line = this.scanner.lineNumber; + this.lastMarker.column = this.scanner.index - this.scanner.lineStart; + return this.finalize(node, new JSXNode.JSXEmptyExpression()); + }; + JSXParser.prototype.parseJSXExpressionContainer = function () { + var node = this.createJSXNode(); + this.expectJSX('{'); + var expression; + if (this.matchJSX('}')) { + expression = this.parseJSXEmptyExpression(); + this.expectJSX('}'); + } + else { + this.finishJSX(); + expression = this.parseAssignmentExpression(); + this.reenterJSX(); + } + return this.finalize(node, new JSXNode.JSXExpressionContainer(expression)); + }; + JSXParser.prototype.parseJSXChildren = function () { + var children = []; + while (!this.scanner.eof()) { + var node = this.createJSXChildNode(); + var token = this.nextJSXText(); + if (token.start < token.end) { + var raw = this.getTokenRaw(token); + var child = this.finalize(node, new JSXNode.JSXText(token.value, raw)); + children.push(child); + } + if (this.scanner.source[this.scanner.index] === '{') { + var container = this.parseJSXExpressionContainer(); + children.push(container); + } + else { + break; + } + } + return children; + }; + JSXParser.prototype.parseComplexJSXElement = function (el) { + var stack = []; + while (!this.scanner.eof()) { + el.children = el.children.concat(this.parseJSXChildren()); + var node = this.createJSXChildNode(); + var element = this.parseJSXBoundaryElement(); + if (element.type === jsx_syntax_1.JSXSyntax.JSXOpeningElement) { + var opening = element; + if (opening.selfClosing) { + var child = this.finalize(node, new JSXNode.JSXElement(opening, [], null)); + el.children.push(child); + } + else { + stack.push(el); + el = { node: node, opening: opening, closing: null, children: [] }; + } + } + if (element.type === jsx_syntax_1.JSXSyntax.JSXClosingElement) { + el.closing = element; + var open_1 = getQualifiedElementName(el.opening.name); + var close_1 = getQualifiedElementName(el.closing.name); + if (open_1 !== close_1) { + this.tolerateError('Expected corresponding JSX closing tag for %0', open_1); + } + if (stack.length > 0) { + var child = this.finalize(el.node, new JSXNode.JSXElement(el.opening, el.children, el.closing)); + el = stack[stack.length - 1]; + el.children.push(child); + stack.pop(); + } + else { + break; + } + } + } + return el; + }; + JSXParser.prototype.parseJSXElement = function () { + var node = this.createJSXNode(); + var opening = this.parseJSXOpeningElement(); + var children = []; + var closing = null; + if (!opening.selfClosing) { + var el = this.parseComplexJSXElement({ node: node, opening: opening, closing: closing, children: children }); + children = el.children; + closing = el.closing; + } + return this.finalize(node, new JSXNode.JSXElement(opening, children, closing)); + }; + JSXParser.prototype.parseJSXRoot = function () { + // Pop the opening '<' added from the lookahead. + if (this.config.tokens) { + this.tokens.pop(); + } + this.startJSX(); + var element = this.parseJSXElement(); + this.finishJSX(); + return element; + }; + JSXParser.prototype.isStartOfExpression = function () { + return _super.prototype.isStartOfExpression.call(this) || this.match('<'); + }; + return JSXParser; + }(parser_1.Parser)); + exports.JSXParser = JSXParser; + + +/***/ }, +/* 4 */ +/***/ function(module, exports) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + // See also tools/generate-unicode-regex.js. + var Regex = { + // Unicode v8.0.0 NonAsciiIdentifierStart: + NonAsciiIdentifierStart: /[\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0-\u08B4\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0AF9\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D\u0C58-\u0C5A\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D5F-\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191E\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303C\u3041-\u3096\u309B-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FD5\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA69D\uA6A0-\uA6EF\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AD\uA7B0-\uA7B7\uA7F7-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA8FD\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uA9E0-\uA9E4\uA9E6-\uA9EF\uA9FA-\uA9FE\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA7E-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDE80-\uDE9C\uDEA0-\uDED0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF75\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDCE0-\uDCF2\uDCF4\uDCF5\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00\uDE10-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE4\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48\uDC80-\uDCB2\uDCC0-\uDCF2]|\uD804[\uDC03-\uDC37\uDC83-\uDCAF\uDCD0-\uDCE8\uDD03-\uDD26\uDD50-\uDD72\uDD76\uDD83-\uDDB2\uDDC1-\uDDC4\uDDDA\uDDDC\uDE00-\uDE11\uDE13-\uDE2B\uDE80-\uDE86\uDE88\uDE8A-\uDE8D\uDE8F-\uDE9D\uDE9F-\uDEA8\uDEB0-\uDEDE\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3D\uDF50\uDF5D-\uDF61]|\uD805[\uDC80-\uDCAF\uDCC4\uDCC5\uDCC7\uDD80-\uDDAE\uDDD8-\uDDDB\uDE00-\uDE2F\uDE44\uDE80-\uDEAA\uDF00-\uDF19]|\uD806[\uDCA0-\uDCDF\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF99]|\uD809[\uDC00-\uDC6E\uDC80-\uDD43]|[\uD80C\uD840-\uD868\uD86A-\uD86C\uD86F-\uD872][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD811[\uDC00-\uDE46]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDED0-\uDEED\uDF00-\uDF2F\uDF40-\uDF43\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50\uDF93-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB]|\uD83A[\uDC00-\uDCC4]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D\uDC20-\uDFFF]|\uD873[\uDC00-\uDEA1]|\uD87E[\uDC00-\uDE1D]/, + // Unicode v8.0.0 NonAsciiIdentifierPart: + NonAsciiIdentifierPart: /[\xAA\xB5\xB7\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0300-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u0483-\u0487\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05BD\u05BF\u05C1\u05C2\u05C4\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2\u0610-\u061A\u0620-\u0669\u066E-\u06D3\u06D5-\u06DC\u06DF-\u06E8\u06EA-\u06FC\u06FF\u0710-\u074A\u074D-\u07B1\u07C0-\u07F5\u07FA\u0800-\u082D\u0840-\u085B\u08A0-\u08B4\u08E3-\u0963\u0966-\u096F\u0971-\u0983\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BC-\u09C4\u09C7\u09C8\u09CB-\u09CE\u09D7\u09DC\u09DD\u09DF-\u09E3\u09E6-\u09F1\u0A01-\u0A03\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A3C\u0A3E-\u0A42\u0A47\u0A48\u0A4B-\u0A4D\u0A51\u0A59-\u0A5C\u0A5E\u0A66-\u0A75\u0A81-\u0A83\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABC-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0AD0\u0AE0-\u0AE3\u0AE6-\u0AEF\u0AF9\u0B01-\u0B03\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3C-\u0B44\u0B47\u0B48\u0B4B-\u0B4D\u0B56\u0B57\u0B5C\u0B5D\u0B5F-\u0B63\u0B66-\u0B6F\u0B71\u0B82\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD0\u0BD7\u0BE6-\u0BEF\u0C00-\u0C03\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55\u0C56\u0C58-\u0C5A\u0C60-\u0C63\u0C66-\u0C6F\u0C81-\u0C83\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBC-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5\u0CD6\u0CDE\u0CE0-\u0CE3\u0CE6-\u0CEF\u0CF1\u0CF2\u0D01-\u0D03\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D-\u0D44\u0D46-\u0D48\u0D4A-\u0D4E\u0D57\u0D5F-\u0D63\u0D66-\u0D6F\u0D7A-\u0D7F\u0D82\u0D83\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0DCA\u0DCF-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DE6-\u0DEF\u0DF2\u0DF3\u0E01-\u0E3A\u0E40-\u0E4E\u0E50-\u0E59\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB9\u0EBB-\u0EBD\u0EC0-\u0EC4\u0EC6\u0EC8-\u0ECD\u0ED0-\u0ED9\u0EDC-\u0EDF\u0F00\u0F18\u0F19\u0F20-\u0F29\u0F35\u0F37\u0F39\u0F3E-\u0F47\u0F49-\u0F6C\u0F71-\u0F84\u0F86-\u0F97\u0F99-\u0FBC\u0FC6\u1000-\u1049\u1050-\u109D\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u135D-\u135F\u1369-\u1371\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176C\u176E-\u1770\u1772\u1773\u1780-\u17D3\u17D7\u17DC\u17DD\u17E0-\u17E9\u180B-\u180D\u1810-\u1819\u1820-\u1877\u1880-\u18AA\u18B0-\u18F5\u1900-\u191E\u1920-\u192B\u1930-\u193B\u1946-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u19D0-\u19DA\u1A00-\u1A1B\u1A20-\u1A5E\u1A60-\u1A7C\u1A7F-\u1A89\u1A90-\u1A99\u1AA7\u1AB0-\u1ABD\u1B00-\u1B4B\u1B50-\u1B59\u1B6B-\u1B73\u1B80-\u1BF3\u1C00-\u1C37\u1C40-\u1C49\u1C4D-\u1C7D\u1CD0-\u1CD2\u1CD4-\u1CF6\u1CF8\u1CF9\u1D00-\u1DF5\u1DFC-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u200C\u200D\u203F\u2040\u2054\u2071\u207F\u2090-\u209C\u20D0-\u20DC\u20E1\u20E5-\u20F0\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D7F-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2DE0-\u2DFF\u3005-\u3007\u3021-\u302F\u3031-\u3035\u3038-\u303C\u3041-\u3096\u3099-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FD5\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA62B\uA640-\uA66F\uA674-\uA67D\uA67F-\uA6F1\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AD\uA7B0-\uA7B7\uA7F7-\uA827\uA840-\uA873\uA880-\uA8C4\uA8D0-\uA8D9\uA8E0-\uA8F7\uA8FB\uA8FD\uA900-\uA92D\uA930-\uA953\uA960-\uA97C\uA980-\uA9C0\uA9CF-\uA9D9\uA9E0-\uA9FE\uAA00-\uAA36\uAA40-\uAA4D\uAA50-\uAA59\uAA60-\uAA76\uAA7A-\uAAC2\uAADB-\uAADD\uAAE0-\uAAEF\uAAF2-\uAAF6\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABEA\uABEC\uABED\uABF0-\uABF9\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE00-\uFE0F\uFE20-\uFE2F\uFE33\uFE34\uFE4D-\uFE4F\uFE70-\uFE74\uFE76-\uFEFC\uFF10-\uFF19\uFF21-\uFF3A\uFF3F\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDDFD\uDE80-\uDE9C\uDEA0-\uDED0\uDEE0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF7A\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDCA0-\uDCA9\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDCE0-\uDCF2\uDCF4\uDCF5\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00-\uDE03\uDE05\uDE06\uDE0C-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE38-\uDE3A\uDE3F\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE6\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48\uDC80-\uDCB2\uDCC0-\uDCF2]|\uD804[\uDC00-\uDC46\uDC66-\uDC6F\uDC7F-\uDCBA\uDCD0-\uDCE8\uDCF0-\uDCF9\uDD00-\uDD34\uDD36-\uDD3F\uDD50-\uDD73\uDD76\uDD80-\uDDC4\uDDCA-\uDDCC\uDDD0-\uDDDA\uDDDC\uDE00-\uDE11\uDE13-\uDE37\uDE80-\uDE86\uDE88\uDE8A-\uDE8D\uDE8F-\uDE9D\uDE9F-\uDEA8\uDEB0-\uDEEA\uDEF0-\uDEF9\uDF00-\uDF03\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3C-\uDF44\uDF47\uDF48\uDF4B-\uDF4D\uDF50\uDF57\uDF5D-\uDF63\uDF66-\uDF6C\uDF70-\uDF74]|\uD805[\uDC80-\uDCC5\uDCC7\uDCD0-\uDCD9\uDD80-\uDDB5\uDDB8-\uDDC0\uDDD8-\uDDDD\uDE00-\uDE40\uDE44\uDE50-\uDE59\uDE80-\uDEB7\uDEC0-\uDEC9\uDF00-\uDF19\uDF1D-\uDF2B\uDF30-\uDF39]|\uD806[\uDCA0-\uDCE9\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF99]|\uD809[\uDC00-\uDC6E\uDC80-\uDD43]|[\uD80C\uD840-\uD868\uD86A-\uD86C\uD86F-\uD872][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD811[\uDC00-\uDE46]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDE60-\uDE69\uDED0-\uDEED\uDEF0-\uDEF4\uDF00-\uDF36\uDF40-\uDF43\uDF50-\uDF59\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50-\uDF7E\uDF8F-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99\uDC9D\uDC9E]|\uD834[\uDD65-\uDD69\uDD6D-\uDD72\uDD7B-\uDD82\uDD85-\uDD8B\uDDAA-\uDDAD\uDE42-\uDE44]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB\uDFCE-\uDFFF]|\uD836[\uDE00-\uDE36\uDE3B-\uDE6C\uDE75\uDE84\uDE9B-\uDE9F\uDEA1-\uDEAF]|\uD83A[\uDC00-\uDCC4\uDCD0-\uDCD6]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D\uDC20-\uDFFF]|\uD873[\uDC00-\uDEA1]|\uD87E[\uDC00-\uDE1D]|\uDB40[\uDD00-\uDDEF]/ + }; + exports.Character = { + /* tslint:disable:no-bitwise */ + fromCodePoint: function (cp) { + return (cp < 0x10000) ? String.fromCharCode(cp) : + String.fromCharCode(0xD800 + ((cp - 0x10000) >> 10)) + + String.fromCharCode(0xDC00 + ((cp - 0x10000) & 1023)); + }, + // https://tc39.github.io/ecma262/#sec-white-space + isWhiteSpace: function (cp) { + return (cp === 0x20) || (cp === 0x09) || (cp === 0x0B) || (cp === 0x0C) || (cp === 0xA0) || + (cp >= 0x1680 && [0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x202F, 0x205F, 0x3000, 0xFEFF].indexOf(cp) >= 0); + }, + // https://tc39.github.io/ecma262/#sec-line-terminators + isLineTerminator: function (cp) { + return (cp === 0x0A) || (cp === 0x0D) || (cp === 0x2028) || (cp === 0x2029); + }, + // https://tc39.github.io/ecma262/#sec-names-and-keywords + isIdentifierStart: function (cp) { + return (cp === 0x24) || (cp === 0x5F) || + (cp >= 0x41 && cp <= 0x5A) || + (cp >= 0x61 && cp <= 0x7A) || + (cp === 0x5C) || + ((cp >= 0x80) && Regex.NonAsciiIdentifierStart.test(exports.Character.fromCodePoint(cp))); + }, + isIdentifierPart: function (cp) { + return (cp === 0x24) || (cp === 0x5F) || + (cp >= 0x41 && cp <= 0x5A) || + (cp >= 0x61 && cp <= 0x7A) || + (cp >= 0x30 && cp <= 0x39) || + (cp === 0x5C) || + ((cp >= 0x80) && Regex.NonAsciiIdentifierPart.test(exports.Character.fromCodePoint(cp))); + }, + // https://tc39.github.io/ecma262/#sec-literals-numeric-literals + isDecimalDigit: function (cp) { + return (cp >= 0x30 && cp <= 0x39); // 0..9 + }, + isHexDigit: function (cp) { + return (cp >= 0x30 && cp <= 0x39) || + (cp >= 0x41 && cp <= 0x46) || + (cp >= 0x61 && cp <= 0x66); // a..f + }, + isOctalDigit: function (cp) { + return (cp >= 0x30 && cp <= 0x37); // 0..7 + } + }; + + +/***/ }, +/* 5 */ +/***/ function(module, exports, __nested_webpack_require_54354__) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + var jsx_syntax_1 = __nested_webpack_require_54354__(6); + /* tslint:disable:max-classes-per-file */ + var JSXClosingElement = (function () { + function JSXClosingElement(name) { + this.type = jsx_syntax_1.JSXSyntax.JSXClosingElement; + this.name = name; + } + return JSXClosingElement; + }()); + exports.JSXClosingElement = JSXClosingElement; + var JSXElement = (function () { + function JSXElement(openingElement, children, closingElement) { + this.type = jsx_syntax_1.JSXSyntax.JSXElement; + this.openingElement = openingElement; + this.children = children; + this.closingElement = closingElement; + } + return JSXElement; + }()); + exports.JSXElement = JSXElement; + var JSXEmptyExpression = (function () { + function JSXEmptyExpression() { + this.type = jsx_syntax_1.JSXSyntax.JSXEmptyExpression; + } + return JSXEmptyExpression; + }()); + exports.JSXEmptyExpression = JSXEmptyExpression; + var JSXExpressionContainer = (function () { + function JSXExpressionContainer(expression) { + this.type = jsx_syntax_1.JSXSyntax.JSXExpressionContainer; + this.expression = expression; + } + return JSXExpressionContainer; + }()); + exports.JSXExpressionContainer = JSXExpressionContainer; + var JSXIdentifier = (function () { + function JSXIdentifier(name) { + this.type = jsx_syntax_1.JSXSyntax.JSXIdentifier; + this.name = name; + } + return JSXIdentifier; + }()); + exports.JSXIdentifier = JSXIdentifier; + var JSXMemberExpression = (function () { + function JSXMemberExpression(object, property) { + this.type = jsx_syntax_1.JSXSyntax.JSXMemberExpression; + this.object = object; + this.property = property; + } + return JSXMemberExpression; + }()); + exports.JSXMemberExpression = JSXMemberExpression; + var JSXAttribute = (function () { + function JSXAttribute(name, value) { + this.type = jsx_syntax_1.JSXSyntax.JSXAttribute; + this.name = name; + this.value = value; + } + return JSXAttribute; + }()); + exports.JSXAttribute = JSXAttribute; + var JSXNamespacedName = (function () { + function JSXNamespacedName(namespace, name) { + this.type = jsx_syntax_1.JSXSyntax.JSXNamespacedName; + this.namespace = namespace; + this.name = name; + } + return JSXNamespacedName; + }()); + exports.JSXNamespacedName = JSXNamespacedName; + var JSXOpeningElement = (function () { + function JSXOpeningElement(name, selfClosing, attributes) { + this.type = jsx_syntax_1.JSXSyntax.JSXOpeningElement; + this.name = name; + this.selfClosing = selfClosing; + this.attributes = attributes; + } + return JSXOpeningElement; + }()); + exports.JSXOpeningElement = JSXOpeningElement; + var JSXSpreadAttribute = (function () { + function JSXSpreadAttribute(argument) { + this.type = jsx_syntax_1.JSXSyntax.JSXSpreadAttribute; + this.argument = argument; + } + return JSXSpreadAttribute; + }()); + exports.JSXSpreadAttribute = JSXSpreadAttribute; + var JSXText = (function () { + function JSXText(value, raw) { + this.type = jsx_syntax_1.JSXSyntax.JSXText; + this.value = value; + this.raw = raw; + } + return JSXText; + }()); + exports.JSXText = JSXText; + + +/***/ }, +/* 6 */ +/***/ function(module, exports) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + exports.JSXSyntax = { + JSXAttribute: 'JSXAttribute', + JSXClosingElement: 'JSXClosingElement', + JSXElement: 'JSXElement', + JSXEmptyExpression: 'JSXEmptyExpression', + JSXExpressionContainer: 'JSXExpressionContainer', + JSXIdentifier: 'JSXIdentifier', + JSXMemberExpression: 'JSXMemberExpression', + JSXNamespacedName: 'JSXNamespacedName', + JSXOpeningElement: 'JSXOpeningElement', + JSXSpreadAttribute: 'JSXSpreadAttribute', + JSXText: 'JSXText' + }; + + +/***/ }, +/* 7 */ +/***/ function(module, exports, __nested_webpack_require_58416__) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + var syntax_1 = __nested_webpack_require_58416__(2); + /* tslint:disable:max-classes-per-file */ + var ArrayExpression = (function () { + function ArrayExpression(elements) { + this.type = syntax_1.Syntax.ArrayExpression; + this.elements = elements; + } + return ArrayExpression; + }()); + exports.ArrayExpression = ArrayExpression; + var ArrayPattern = (function () { + function ArrayPattern(elements) { + this.type = syntax_1.Syntax.ArrayPattern; + this.elements = elements; + } + return ArrayPattern; + }()); + exports.ArrayPattern = ArrayPattern; + var ArrowFunctionExpression = (function () { + function ArrowFunctionExpression(params, body, expression) { + this.type = syntax_1.Syntax.ArrowFunctionExpression; + this.id = null; + this.params = params; + this.body = body; + this.generator = false; + this.expression = expression; + this.async = false; + } + return ArrowFunctionExpression; + }()); + exports.ArrowFunctionExpression = ArrowFunctionExpression; + var AssignmentExpression = (function () { + function AssignmentExpression(operator, left, right) { + this.type = syntax_1.Syntax.AssignmentExpression; + this.operator = operator; + this.left = left; + this.right = right; + } + return AssignmentExpression; + }()); + exports.AssignmentExpression = AssignmentExpression; + var AssignmentPattern = (function () { + function AssignmentPattern(left, right) { + this.type = syntax_1.Syntax.AssignmentPattern; + this.left = left; + this.right = right; + } + return AssignmentPattern; + }()); + exports.AssignmentPattern = AssignmentPattern; + var AsyncArrowFunctionExpression = (function () { + function AsyncArrowFunctionExpression(params, body, expression) { + this.type = syntax_1.Syntax.ArrowFunctionExpression; + this.id = null; + this.params = params; + this.body = body; + this.generator = false; + this.expression = expression; + this.async = true; + } + return AsyncArrowFunctionExpression; + }()); + exports.AsyncArrowFunctionExpression = AsyncArrowFunctionExpression; + var AsyncFunctionDeclaration = (function () { + function AsyncFunctionDeclaration(id, params, body) { + this.type = syntax_1.Syntax.FunctionDeclaration; + this.id = id; + this.params = params; + this.body = body; + this.generator = false; + this.expression = false; + this.async = true; + } + return AsyncFunctionDeclaration; + }()); + exports.AsyncFunctionDeclaration = AsyncFunctionDeclaration; + var AsyncFunctionExpression = (function () { + function AsyncFunctionExpression(id, params, body) { + this.type = syntax_1.Syntax.FunctionExpression; + this.id = id; + this.params = params; + this.body = body; + this.generator = false; + this.expression = false; + this.async = true; + } + return AsyncFunctionExpression; + }()); + exports.AsyncFunctionExpression = AsyncFunctionExpression; + var AwaitExpression = (function () { + function AwaitExpression(argument) { + this.type = syntax_1.Syntax.AwaitExpression; + this.argument = argument; + } + return AwaitExpression; + }()); + exports.AwaitExpression = AwaitExpression; + var BinaryExpression = (function () { + function BinaryExpression(operator, left, right) { + var logical = (operator === '||' || operator === '&&'); + this.type = logical ? syntax_1.Syntax.LogicalExpression : syntax_1.Syntax.BinaryExpression; + this.operator = operator; + this.left = left; + this.right = right; + } + return BinaryExpression; + }()); + exports.BinaryExpression = BinaryExpression; + var BlockStatement = (function () { + function BlockStatement(body) { + this.type = syntax_1.Syntax.BlockStatement; + this.body = body; + } + return BlockStatement; + }()); + exports.BlockStatement = BlockStatement; + var BreakStatement = (function () { + function BreakStatement(label) { + this.type = syntax_1.Syntax.BreakStatement; + this.label = label; + } + return BreakStatement; + }()); + exports.BreakStatement = BreakStatement; + var CallExpression = (function () { + function CallExpression(callee, args) { + this.type = syntax_1.Syntax.CallExpression; + this.callee = callee; + this.arguments = args; + } + return CallExpression; + }()); + exports.CallExpression = CallExpression; + var CatchClause = (function () { + function CatchClause(param, body) { + this.type = syntax_1.Syntax.CatchClause; + this.param = param; + this.body = body; + } + return CatchClause; + }()); + exports.CatchClause = CatchClause; + var ClassBody = (function () { + function ClassBody(body) { + this.type = syntax_1.Syntax.ClassBody; + this.body = body; + } + return ClassBody; + }()); + exports.ClassBody = ClassBody; + var ClassDeclaration = (function () { + function ClassDeclaration(id, superClass, body) { + this.type = syntax_1.Syntax.ClassDeclaration; + this.id = id; + this.superClass = superClass; + this.body = body; + } + return ClassDeclaration; + }()); + exports.ClassDeclaration = ClassDeclaration; + var ClassExpression = (function () { + function ClassExpression(id, superClass, body) { + this.type = syntax_1.Syntax.ClassExpression; + this.id = id; + this.superClass = superClass; + this.body = body; + } + return ClassExpression; + }()); + exports.ClassExpression = ClassExpression; + var ComputedMemberExpression = (function () { + function ComputedMemberExpression(object, property) { + this.type = syntax_1.Syntax.MemberExpression; + this.computed = true; + this.object = object; + this.property = property; + } + return ComputedMemberExpression; + }()); + exports.ComputedMemberExpression = ComputedMemberExpression; + var ConditionalExpression = (function () { + function ConditionalExpression(test, consequent, alternate) { + this.type = syntax_1.Syntax.ConditionalExpression; + this.test = test; + this.consequent = consequent; + this.alternate = alternate; + } + return ConditionalExpression; + }()); + exports.ConditionalExpression = ConditionalExpression; + var ContinueStatement = (function () { + function ContinueStatement(label) { + this.type = syntax_1.Syntax.ContinueStatement; + this.label = label; + } + return ContinueStatement; + }()); + exports.ContinueStatement = ContinueStatement; + var DebuggerStatement = (function () { + function DebuggerStatement() { + this.type = syntax_1.Syntax.DebuggerStatement; + } + return DebuggerStatement; + }()); + exports.DebuggerStatement = DebuggerStatement; + var Directive = (function () { + function Directive(expression, directive) { + this.type = syntax_1.Syntax.ExpressionStatement; + this.expression = expression; + this.directive = directive; + } + return Directive; + }()); + exports.Directive = Directive; + var DoWhileStatement = (function () { + function DoWhileStatement(body, test) { + this.type = syntax_1.Syntax.DoWhileStatement; + this.body = body; + this.test = test; + } + return DoWhileStatement; + }()); + exports.DoWhileStatement = DoWhileStatement; + var EmptyStatement = (function () { + function EmptyStatement() { + this.type = syntax_1.Syntax.EmptyStatement; + } + return EmptyStatement; + }()); + exports.EmptyStatement = EmptyStatement; + var ExportAllDeclaration = (function () { + function ExportAllDeclaration(source) { + this.type = syntax_1.Syntax.ExportAllDeclaration; + this.source = source; + } + return ExportAllDeclaration; + }()); + exports.ExportAllDeclaration = ExportAllDeclaration; + var ExportDefaultDeclaration = (function () { + function ExportDefaultDeclaration(declaration) { + this.type = syntax_1.Syntax.ExportDefaultDeclaration; + this.declaration = declaration; + } + return ExportDefaultDeclaration; + }()); + exports.ExportDefaultDeclaration = ExportDefaultDeclaration; + var ExportNamedDeclaration = (function () { + function ExportNamedDeclaration(declaration, specifiers, source) { + this.type = syntax_1.Syntax.ExportNamedDeclaration; + this.declaration = declaration; + this.specifiers = specifiers; + this.source = source; + } + return ExportNamedDeclaration; + }()); + exports.ExportNamedDeclaration = ExportNamedDeclaration; + var ExportSpecifier = (function () { + function ExportSpecifier(local, exported) { + this.type = syntax_1.Syntax.ExportSpecifier; + this.exported = exported; + this.local = local; + } + return ExportSpecifier; + }()); + exports.ExportSpecifier = ExportSpecifier; + var ExpressionStatement = (function () { + function ExpressionStatement(expression) { + this.type = syntax_1.Syntax.ExpressionStatement; + this.expression = expression; + } + return ExpressionStatement; + }()); + exports.ExpressionStatement = ExpressionStatement; + var ForInStatement = (function () { + function ForInStatement(left, right, body) { + this.type = syntax_1.Syntax.ForInStatement; + this.left = left; + this.right = right; + this.body = body; + this.each = false; + } + return ForInStatement; + }()); + exports.ForInStatement = ForInStatement; + var ForOfStatement = (function () { + function ForOfStatement(left, right, body) { + this.type = syntax_1.Syntax.ForOfStatement; + this.left = left; + this.right = right; + this.body = body; + } + return ForOfStatement; + }()); + exports.ForOfStatement = ForOfStatement; + var ForStatement = (function () { + function ForStatement(init, test, update, body) { + this.type = syntax_1.Syntax.ForStatement; + this.init = init; + this.test = test; + this.update = update; + this.body = body; + } + return ForStatement; + }()); + exports.ForStatement = ForStatement; + var FunctionDeclaration = (function () { + function FunctionDeclaration(id, params, body, generator) { + this.type = syntax_1.Syntax.FunctionDeclaration; + this.id = id; + this.params = params; + this.body = body; + this.generator = generator; + this.expression = false; + this.async = false; + } + return FunctionDeclaration; + }()); + exports.FunctionDeclaration = FunctionDeclaration; + var FunctionExpression = (function () { + function FunctionExpression(id, params, body, generator) { + this.type = syntax_1.Syntax.FunctionExpression; + this.id = id; + this.params = params; + this.body = body; + this.generator = generator; + this.expression = false; + this.async = false; + } + return FunctionExpression; + }()); + exports.FunctionExpression = FunctionExpression; + var Identifier = (function () { + function Identifier(name) { + this.type = syntax_1.Syntax.Identifier; + this.name = name; + } + return Identifier; + }()); + exports.Identifier = Identifier; + var IfStatement = (function () { + function IfStatement(test, consequent, alternate) { + this.type = syntax_1.Syntax.IfStatement; + this.test = test; + this.consequent = consequent; + this.alternate = alternate; + } + return IfStatement; + }()); + exports.IfStatement = IfStatement; + var ImportDeclaration = (function () { + function ImportDeclaration(specifiers, source) { + this.type = syntax_1.Syntax.ImportDeclaration; + this.specifiers = specifiers; + this.source = source; + } + return ImportDeclaration; + }()); + exports.ImportDeclaration = ImportDeclaration; + var ImportDefaultSpecifier = (function () { + function ImportDefaultSpecifier(local) { + this.type = syntax_1.Syntax.ImportDefaultSpecifier; + this.local = local; + } + return ImportDefaultSpecifier; + }()); + exports.ImportDefaultSpecifier = ImportDefaultSpecifier; + var ImportNamespaceSpecifier = (function () { + function ImportNamespaceSpecifier(local) { + this.type = syntax_1.Syntax.ImportNamespaceSpecifier; + this.local = local; + } + return ImportNamespaceSpecifier; + }()); + exports.ImportNamespaceSpecifier = ImportNamespaceSpecifier; + var ImportSpecifier = (function () { + function ImportSpecifier(local, imported) { + this.type = syntax_1.Syntax.ImportSpecifier; + this.local = local; + this.imported = imported; + } + return ImportSpecifier; + }()); + exports.ImportSpecifier = ImportSpecifier; + var LabeledStatement = (function () { + function LabeledStatement(label, body) { + this.type = syntax_1.Syntax.LabeledStatement; + this.label = label; + this.body = body; + } + return LabeledStatement; + }()); + exports.LabeledStatement = LabeledStatement; + var Literal = (function () { + function Literal(value, raw) { + this.type = syntax_1.Syntax.Literal; + this.value = value; + this.raw = raw; + } + return Literal; + }()); + exports.Literal = Literal; + var MetaProperty = (function () { + function MetaProperty(meta, property) { + this.type = syntax_1.Syntax.MetaProperty; + this.meta = meta; + this.property = property; + } + return MetaProperty; + }()); + exports.MetaProperty = MetaProperty; + var MethodDefinition = (function () { + function MethodDefinition(key, computed, value, kind, isStatic) { + this.type = syntax_1.Syntax.MethodDefinition; + this.key = key; + this.computed = computed; + this.value = value; + this.kind = kind; + this.static = isStatic; + } + return MethodDefinition; + }()); + exports.MethodDefinition = MethodDefinition; + var Module = (function () { + function Module(body) { + this.type = syntax_1.Syntax.Program; + this.body = body; + this.sourceType = 'module'; + } + return Module; + }()); + exports.Module = Module; + var NewExpression = (function () { + function NewExpression(callee, args) { + this.type = syntax_1.Syntax.NewExpression; + this.callee = callee; + this.arguments = args; + } + return NewExpression; + }()); + exports.NewExpression = NewExpression; + var ObjectExpression = (function () { + function ObjectExpression(properties) { + this.type = syntax_1.Syntax.ObjectExpression; + this.properties = properties; + } + return ObjectExpression; + }()); + exports.ObjectExpression = ObjectExpression; + var ObjectPattern = (function () { + function ObjectPattern(properties) { + this.type = syntax_1.Syntax.ObjectPattern; + this.properties = properties; + } + return ObjectPattern; + }()); + exports.ObjectPattern = ObjectPattern; + var Property = (function () { + function Property(kind, key, computed, value, method, shorthand) { + this.type = syntax_1.Syntax.Property; + this.key = key; + this.computed = computed; + this.value = value; + this.kind = kind; + this.method = method; + this.shorthand = shorthand; + } + return Property; + }()); + exports.Property = Property; + var RegexLiteral = (function () { + function RegexLiteral(value, raw, pattern, flags) { + this.type = syntax_1.Syntax.Literal; + this.value = value; + this.raw = raw; + this.regex = { pattern: pattern, flags: flags }; + } + return RegexLiteral; + }()); + exports.RegexLiteral = RegexLiteral; + var RestElement = (function () { + function RestElement(argument) { + this.type = syntax_1.Syntax.RestElement; + this.argument = argument; + } + return RestElement; + }()); + exports.RestElement = RestElement; + var ReturnStatement = (function () { + function ReturnStatement(argument) { + this.type = syntax_1.Syntax.ReturnStatement; + this.argument = argument; + } + return ReturnStatement; + }()); + exports.ReturnStatement = ReturnStatement; + var Script = (function () { + function Script(body) { + this.type = syntax_1.Syntax.Program; + this.body = body; + this.sourceType = 'script'; + } + return Script; + }()); + exports.Script = Script; + var SequenceExpression = (function () { + function SequenceExpression(expressions) { + this.type = syntax_1.Syntax.SequenceExpression; + this.expressions = expressions; + } + return SequenceExpression; + }()); + exports.SequenceExpression = SequenceExpression; + var SpreadElement = (function () { + function SpreadElement(argument) { + this.type = syntax_1.Syntax.SpreadElement; + this.argument = argument; + } + return SpreadElement; + }()); + exports.SpreadElement = SpreadElement; + var StaticMemberExpression = (function () { + function StaticMemberExpression(object, property) { + this.type = syntax_1.Syntax.MemberExpression; + this.computed = false; + this.object = object; + this.property = property; + } + return StaticMemberExpression; + }()); + exports.StaticMemberExpression = StaticMemberExpression; + var Super = (function () { + function Super() { + this.type = syntax_1.Syntax.Super; + } + return Super; + }()); + exports.Super = Super; + var SwitchCase = (function () { + function SwitchCase(test, consequent) { + this.type = syntax_1.Syntax.SwitchCase; + this.test = test; + this.consequent = consequent; + } + return SwitchCase; + }()); + exports.SwitchCase = SwitchCase; + var SwitchStatement = (function () { + function SwitchStatement(discriminant, cases) { + this.type = syntax_1.Syntax.SwitchStatement; + this.discriminant = discriminant; + this.cases = cases; + } + return SwitchStatement; + }()); + exports.SwitchStatement = SwitchStatement; + var TaggedTemplateExpression = (function () { + function TaggedTemplateExpression(tag, quasi) { + this.type = syntax_1.Syntax.TaggedTemplateExpression; + this.tag = tag; + this.quasi = quasi; + } + return TaggedTemplateExpression; + }()); + exports.TaggedTemplateExpression = TaggedTemplateExpression; + var TemplateElement = (function () { + function TemplateElement(value, tail) { + this.type = syntax_1.Syntax.TemplateElement; + this.value = value; + this.tail = tail; + } + return TemplateElement; + }()); + exports.TemplateElement = TemplateElement; + var TemplateLiteral = (function () { + function TemplateLiteral(quasis, expressions) { + this.type = syntax_1.Syntax.TemplateLiteral; + this.quasis = quasis; + this.expressions = expressions; + } + return TemplateLiteral; + }()); + exports.TemplateLiteral = TemplateLiteral; + var ThisExpression = (function () { + function ThisExpression() { + this.type = syntax_1.Syntax.ThisExpression; + } + return ThisExpression; + }()); + exports.ThisExpression = ThisExpression; + var ThrowStatement = (function () { + function ThrowStatement(argument) { + this.type = syntax_1.Syntax.ThrowStatement; + this.argument = argument; + } + return ThrowStatement; + }()); + exports.ThrowStatement = ThrowStatement; + var TryStatement = (function () { + function TryStatement(block, handler, finalizer) { + this.type = syntax_1.Syntax.TryStatement; + this.block = block; + this.handler = handler; + this.finalizer = finalizer; + } + return TryStatement; + }()); + exports.TryStatement = TryStatement; + var UnaryExpression = (function () { + function UnaryExpression(operator, argument) { + this.type = syntax_1.Syntax.UnaryExpression; + this.operator = operator; + this.argument = argument; + this.prefix = true; + } + return UnaryExpression; + }()); + exports.UnaryExpression = UnaryExpression; + var UpdateExpression = (function () { + function UpdateExpression(operator, argument, prefix) { + this.type = syntax_1.Syntax.UpdateExpression; + this.operator = operator; + this.argument = argument; + this.prefix = prefix; + } + return UpdateExpression; + }()); + exports.UpdateExpression = UpdateExpression; + var VariableDeclaration = (function () { + function VariableDeclaration(declarations, kind) { + this.type = syntax_1.Syntax.VariableDeclaration; + this.declarations = declarations; + this.kind = kind; + } + return VariableDeclaration; + }()); + exports.VariableDeclaration = VariableDeclaration; + var VariableDeclarator = (function () { + function VariableDeclarator(id, init) { + this.type = syntax_1.Syntax.VariableDeclarator; + this.id = id; + this.init = init; + } + return VariableDeclarator; + }()); + exports.VariableDeclarator = VariableDeclarator; + var WhileStatement = (function () { + function WhileStatement(test, body) { + this.type = syntax_1.Syntax.WhileStatement; + this.test = test; + this.body = body; + } + return WhileStatement; + }()); + exports.WhileStatement = WhileStatement; + var WithStatement = (function () { + function WithStatement(object, body) { + this.type = syntax_1.Syntax.WithStatement; + this.object = object; + this.body = body; + } + return WithStatement; + }()); + exports.WithStatement = WithStatement; + var YieldExpression = (function () { + function YieldExpression(argument, delegate) { + this.type = syntax_1.Syntax.YieldExpression; + this.argument = argument; + this.delegate = delegate; + } + return YieldExpression; + }()); + exports.YieldExpression = YieldExpression; + + +/***/ }, +/* 8 */ +/***/ function(module, exports, __nested_webpack_require_80491__) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + var assert_1 = __nested_webpack_require_80491__(9); + var error_handler_1 = __nested_webpack_require_80491__(10); + var messages_1 = __nested_webpack_require_80491__(11); + var Node = __nested_webpack_require_80491__(7); + var scanner_1 = __nested_webpack_require_80491__(12); + var syntax_1 = __nested_webpack_require_80491__(2); + var token_1 = __nested_webpack_require_80491__(13); + var ArrowParameterPlaceHolder = 'ArrowParameterPlaceHolder'; + var Parser = (function () { + function Parser(code, options, delegate) { + if (options === void 0) { options = {}; } + this.config = { + range: (typeof options.range === 'boolean') && options.range, + loc: (typeof options.loc === 'boolean') && options.loc, + source: null, + tokens: (typeof options.tokens === 'boolean') && options.tokens, + comment: (typeof options.comment === 'boolean') && options.comment, + tolerant: (typeof options.tolerant === 'boolean') && options.tolerant + }; + if (this.config.loc && options.source && options.source !== null) { + this.config.source = String(options.source); + } + this.delegate = delegate; + this.errorHandler = new error_handler_1.ErrorHandler(); + this.errorHandler.tolerant = this.config.tolerant; + this.scanner = new scanner_1.Scanner(code, this.errorHandler); + this.scanner.trackComment = this.config.comment; + this.operatorPrecedence = { + ')': 0, + ';': 0, + ',': 0, + '=': 0, + ']': 0, + '||': 1, + '&&': 2, + '|': 3, + '^': 4, + '&': 5, + '==': 6, + '!=': 6, + '===': 6, + '!==': 6, + '<': 7, + '>': 7, + '<=': 7, + '>=': 7, + '<<': 8, + '>>': 8, + '>>>': 8, + '+': 9, + '-': 9, + '*': 11, + '/': 11, + '%': 11 + }; + this.lookahead = { + type: 2 /* EOF */, + value: '', + lineNumber: this.scanner.lineNumber, + lineStart: 0, + start: 0, + end: 0 + }; + this.hasLineTerminator = false; + this.context = { + isModule: false, + await: false, + allowIn: true, + allowStrictDirective: true, + allowYield: true, + firstCoverInitializedNameError: null, + isAssignmentTarget: false, + isBindingElement: false, + inFunctionBody: false, + inIteration: false, + inSwitch: false, + labelSet: {}, + strict: false + }; + this.tokens = []; + this.startMarker = { + index: 0, + line: this.scanner.lineNumber, + column: 0 + }; + this.lastMarker = { + index: 0, + line: this.scanner.lineNumber, + column: 0 + }; + this.nextToken(); + this.lastMarker = { + index: this.scanner.index, + line: this.scanner.lineNumber, + column: this.scanner.index - this.scanner.lineStart + }; + } + Parser.prototype.throwError = function (messageFormat) { + var values = []; + for (var _i = 1; _i < arguments.length; _i++) { + values[_i - 1] = arguments[_i]; + } + var args = Array.prototype.slice.call(arguments, 1); + var msg = messageFormat.replace(/%(\d)/g, function (whole, idx) { + assert_1.assert(idx < args.length, 'Message reference must be in range'); + return args[idx]; + }); + var index = this.lastMarker.index; + var line = this.lastMarker.line; + var column = this.lastMarker.column + 1; + throw this.errorHandler.createError(index, line, column, msg); + }; + Parser.prototype.tolerateError = function (messageFormat) { + var values = []; + for (var _i = 1; _i < arguments.length; _i++) { + values[_i - 1] = arguments[_i]; + } + var args = Array.prototype.slice.call(arguments, 1); + var msg = messageFormat.replace(/%(\d)/g, function (whole, idx) { + assert_1.assert(idx < args.length, 'Message reference must be in range'); + return args[idx]; + }); + var index = this.lastMarker.index; + var line = this.scanner.lineNumber; + var column = this.lastMarker.column + 1; + this.errorHandler.tolerateError(index, line, column, msg); + }; + // Throw an exception because of the token. + Parser.prototype.unexpectedTokenError = function (token, message) { + var msg = message || messages_1.Messages.UnexpectedToken; + var value; + if (token) { + if (!message) { + msg = (token.type === 2 /* EOF */) ? messages_1.Messages.UnexpectedEOS : + (token.type === 3 /* Identifier */) ? messages_1.Messages.UnexpectedIdentifier : + (token.type === 6 /* NumericLiteral */) ? messages_1.Messages.UnexpectedNumber : + (token.type === 8 /* StringLiteral */) ? messages_1.Messages.UnexpectedString : + (token.type === 10 /* Template */) ? messages_1.Messages.UnexpectedTemplate : + messages_1.Messages.UnexpectedToken; + if (token.type === 4 /* Keyword */) { + if (this.scanner.isFutureReservedWord(token.value)) { + msg = messages_1.Messages.UnexpectedReserved; + } + else if (this.context.strict && this.scanner.isStrictModeReservedWord(token.value)) { + msg = messages_1.Messages.StrictReservedWord; + } + } + } + value = token.value; + } + else { + value = 'ILLEGAL'; + } + msg = msg.replace('%0', value); + if (token && typeof token.lineNumber === 'number') { + var index = token.start; + var line = token.lineNumber; + var lastMarkerLineStart = this.lastMarker.index - this.lastMarker.column; + var column = token.start - lastMarkerLineStart + 1; + return this.errorHandler.createError(index, line, column, msg); + } + else { + var index = this.lastMarker.index; + var line = this.lastMarker.line; + var column = this.lastMarker.column + 1; + return this.errorHandler.createError(index, line, column, msg); + } + }; + Parser.prototype.throwUnexpectedToken = function (token, message) { + throw this.unexpectedTokenError(token, message); + }; + Parser.prototype.tolerateUnexpectedToken = function (token, message) { + this.errorHandler.tolerate(this.unexpectedTokenError(token, message)); + }; + Parser.prototype.collectComments = function () { + if (!this.config.comment) { + this.scanner.scanComments(); + } + else { + var comments = this.scanner.scanComments(); + if (comments.length > 0 && this.delegate) { + for (var i = 0; i < comments.length; ++i) { + var e = comments[i]; + var node = void 0; + node = { + type: e.multiLine ? 'BlockComment' : 'LineComment', + value: this.scanner.source.slice(e.slice[0], e.slice[1]) + }; + if (this.config.range) { + node.range = e.range; + } + if (this.config.loc) { + node.loc = e.loc; + } + var metadata = { + start: { + line: e.loc.start.line, + column: e.loc.start.column, + offset: e.range[0] + }, + end: { + line: e.loc.end.line, + column: e.loc.end.column, + offset: e.range[1] + } + }; + this.delegate(node, metadata); + } + } + } + }; + // From internal representation to an external structure + Parser.prototype.getTokenRaw = function (token) { + return this.scanner.source.slice(token.start, token.end); + }; + Parser.prototype.convertToken = function (token) { + var t = { + type: token_1.TokenName[token.type], + value: this.getTokenRaw(token) + }; + if (this.config.range) { + t.range = [token.start, token.end]; + } + if (this.config.loc) { + t.loc = { + start: { + line: this.startMarker.line, + column: this.startMarker.column + }, + end: { + line: this.scanner.lineNumber, + column: this.scanner.index - this.scanner.lineStart + } + }; + } + if (token.type === 9 /* RegularExpression */) { + var pattern = token.pattern; + var flags = token.flags; + t.regex = { pattern: pattern, flags: flags }; + } + return t; + }; + Parser.prototype.nextToken = function () { + var token = this.lookahead; + this.lastMarker.index = this.scanner.index; + this.lastMarker.line = this.scanner.lineNumber; + this.lastMarker.column = this.scanner.index - this.scanner.lineStart; + this.collectComments(); + if (this.scanner.index !== this.startMarker.index) { + this.startMarker.index = this.scanner.index; + this.startMarker.line = this.scanner.lineNumber; + this.startMarker.column = this.scanner.index - this.scanner.lineStart; + } + var next = this.scanner.lex(); + this.hasLineTerminator = (token.lineNumber !== next.lineNumber); + if (next && this.context.strict && next.type === 3 /* Identifier */) { + if (this.scanner.isStrictModeReservedWord(next.value)) { + next.type = 4 /* Keyword */; + } + } + this.lookahead = next; + if (this.config.tokens && next.type !== 2 /* EOF */) { + this.tokens.push(this.convertToken(next)); + } + return token; + }; + Parser.prototype.nextRegexToken = function () { + this.collectComments(); + var token = this.scanner.scanRegExp(); + if (this.config.tokens) { + // Pop the previous token, '/' or '/=' + // This is added from the lookahead token. + this.tokens.pop(); + this.tokens.push(this.convertToken(token)); + } + // Prime the next lookahead. + this.lookahead = token; + this.nextToken(); + return token; + }; + Parser.prototype.createNode = function () { + return { + index: this.startMarker.index, + line: this.startMarker.line, + column: this.startMarker.column + }; + }; + Parser.prototype.startNode = function (token, lastLineStart) { + if (lastLineStart === void 0) { lastLineStart = 0; } + var column = token.start - token.lineStart; + var line = token.lineNumber; + if (column < 0) { + column += lastLineStart; + line--; + } + return { + index: token.start, + line: line, + column: column + }; + }; + Parser.prototype.finalize = function (marker, node) { + if (this.config.range) { + node.range = [marker.index, this.lastMarker.index]; + } + if (this.config.loc) { + node.loc = { + start: { + line: marker.line, + column: marker.column, + }, + end: { + line: this.lastMarker.line, + column: this.lastMarker.column + } + }; + if (this.config.source) { + node.loc.source = this.config.source; + } + } + if (this.delegate) { + var metadata = { + start: { + line: marker.line, + column: marker.column, + offset: marker.index + }, + end: { + line: this.lastMarker.line, + column: this.lastMarker.column, + offset: this.lastMarker.index + } + }; + this.delegate(node, metadata); + } + return node; + }; + // Expect the next token to match the specified punctuator. + // If not, an exception will be thrown. + Parser.prototype.expect = function (value) { + var token = this.nextToken(); + if (token.type !== 7 /* Punctuator */ || token.value !== value) { + this.throwUnexpectedToken(token); + } + }; + // Quietly expect a comma when in tolerant mode, otherwise delegates to expect(). + Parser.prototype.expectCommaSeparator = function () { + if (this.config.tolerant) { + var token = this.lookahead; + if (token.type === 7 /* Punctuator */ && token.value === ',') { + this.nextToken(); + } + else if (token.type === 7 /* Punctuator */ && token.value === ';') { + this.nextToken(); + this.tolerateUnexpectedToken(token); + } + else { + this.tolerateUnexpectedToken(token, messages_1.Messages.UnexpectedToken); + } + } + else { + this.expect(','); + } + }; + // Expect the next token to match the specified keyword. + // If not, an exception will be thrown. + Parser.prototype.expectKeyword = function (keyword) { + var token = this.nextToken(); + if (token.type !== 4 /* Keyword */ || token.value !== keyword) { + this.throwUnexpectedToken(token); + } + }; + // Return true if the next token matches the specified punctuator. + Parser.prototype.match = function (value) { + return this.lookahead.type === 7 /* Punctuator */ && this.lookahead.value === value; + }; + // Return true if the next token matches the specified keyword + Parser.prototype.matchKeyword = function (keyword) { + return this.lookahead.type === 4 /* Keyword */ && this.lookahead.value === keyword; + }; + // Return true if the next token matches the specified contextual keyword + // (where an identifier is sometimes a keyword depending on the context) + Parser.prototype.matchContextualKeyword = function (keyword) { + return this.lookahead.type === 3 /* Identifier */ && this.lookahead.value === keyword; + }; + // Return true if the next token is an assignment operator + Parser.prototype.matchAssign = function () { + if (this.lookahead.type !== 7 /* Punctuator */) { + return false; + } + var op = this.lookahead.value; + return op === '=' || + op === '*=' || + op === '**=' || + op === '/=' || + op === '%=' || + op === '+=' || + op === '-=' || + op === '<<=' || + op === '>>=' || + op === '>>>=' || + op === '&=' || + op === '^=' || + op === '|='; + }; + // Cover grammar support. + // + // When an assignment expression position starts with an left parenthesis, the determination of the type + // of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead) + // or the first comma. This situation also defers the determination of all the expressions nested in the pair. + // + // There are three productions that can be parsed in a parentheses pair that needs to be determined + // after the outermost pair is closed. They are: + // + // 1. AssignmentExpression + // 2. BindingElements + // 3. AssignmentTargets + // + // In order to avoid exponential backtracking, we use two flags to denote if the production can be + // binding element or assignment target. + // + // The three productions have the relationship: + // + // BindingElements ⊆ AssignmentTargets ⊆ AssignmentExpression + // + // with a single exception that CoverInitializedName when used directly in an Expression, generates + // an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the + // first usage of CoverInitializedName and report it when we reached the end of the parentheses pair. + // + // isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not + // effect the current flags. This means the production the parser parses is only used as an expression. Therefore + // the CoverInitializedName check is conducted. + // + // inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates + // the flags outside of the parser. This means the production the parser parses is used as a part of a potential + // pattern. The CoverInitializedName check is deferred. + Parser.prototype.isolateCoverGrammar = function (parseFunction) { + var previousIsBindingElement = this.context.isBindingElement; + var previousIsAssignmentTarget = this.context.isAssignmentTarget; + var previousFirstCoverInitializedNameError = this.context.firstCoverInitializedNameError; + this.context.isBindingElement = true; + this.context.isAssignmentTarget = true; + this.context.firstCoverInitializedNameError = null; + var result = parseFunction.call(this); + if (this.context.firstCoverInitializedNameError !== null) { + this.throwUnexpectedToken(this.context.firstCoverInitializedNameError); + } + this.context.isBindingElement = previousIsBindingElement; + this.context.isAssignmentTarget = previousIsAssignmentTarget; + this.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError; + return result; + }; + Parser.prototype.inheritCoverGrammar = function (parseFunction) { + var previousIsBindingElement = this.context.isBindingElement; + var previousIsAssignmentTarget = this.context.isAssignmentTarget; + var previousFirstCoverInitializedNameError = this.context.firstCoverInitializedNameError; + this.context.isBindingElement = true; + this.context.isAssignmentTarget = true; + this.context.firstCoverInitializedNameError = null; + var result = parseFunction.call(this); + this.context.isBindingElement = this.context.isBindingElement && previousIsBindingElement; + this.context.isAssignmentTarget = this.context.isAssignmentTarget && previousIsAssignmentTarget; + this.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError || this.context.firstCoverInitializedNameError; + return result; + }; + Parser.prototype.consumeSemicolon = function () { + if (this.match(';')) { + this.nextToken(); + } + else if (!this.hasLineTerminator) { + if (this.lookahead.type !== 2 /* EOF */ && !this.match('}')) { + this.throwUnexpectedToken(this.lookahead); + } + this.lastMarker.index = this.startMarker.index; + this.lastMarker.line = this.startMarker.line; + this.lastMarker.column = this.startMarker.column; + } + }; + // https://tc39.github.io/ecma262/#sec-primary-expression + Parser.prototype.parsePrimaryExpression = function () { + var node = this.createNode(); + var expr; + var token, raw; + switch (this.lookahead.type) { + case 3 /* Identifier */: + if ((this.context.isModule || this.context.await) && this.lookahead.value === 'await') { + this.tolerateUnexpectedToken(this.lookahead); + } + expr = this.matchAsyncFunction() ? this.parseFunctionExpression() : this.finalize(node, new Node.Identifier(this.nextToken().value)); + break; + case 6 /* NumericLiteral */: + case 8 /* StringLiteral */: + if (this.context.strict && this.lookahead.octal) { + this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.StrictOctalLiteral); + } + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + token = this.nextToken(); + raw = this.getTokenRaw(token); + expr = this.finalize(node, new Node.Literal(token.value, raw)); + break; + case 1 /* BooleanLiteral */: + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + token = this.nextToken(); + raw = this.getTokenRaw(token); + expr = this.finalize(node, new Node.Literal(token.value === 'true', raw)); + break; + case 5 /* NullLiteral */: + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + token = this.nextToken(); + raw = this.getTokenRaw(token); + expr = this.finalize(node, new Node.Literal(null, raw)); + break; + case 10 /* Template */: + expr = this.parseTemplateLiteral(); + break; + case 7 /* Punctuator */: + switch (this.lookahead.value) { + case '(': + this.context.isBindingElement = false; + expr = this.inheritCoverGrammar(this.parseGroupExpression); + break; + case '[': + expr = this.inheritCoverGrammar(this.parseArrayInitializer); + break; + case '{': + expr = this.inheritCoverGrammar(this.parseObjectInitializer); + break; + case '/': + case '/=': + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + this.scanner.index = this.startMarker.index; + token = this.nextRegexToken(); + raw = this.getTokenRaw(token); + expr = this.finalize(node, new Node.RegexLiteral(token.regex, raw, token.pattern, token.flags)); + break; + default: + expr = this.throwUnexpectedToken(this.nextToken()); + } + break; + case 4 /* Keyword */: + if (!this.context.strict && this.context.allowYield && this.matchKeyword('yield')) { + expr = this.parseIdentifierName(); + } + else if (!this.context.strict && this.matchKeyword('let')) { + expr = this.finalize(node, new Node.Identifier(this.nextToken().value)); + } + else { + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + if (this.matchKeyword('function')) { + expr = this.parseFunctionExpression(); + } + else if (this.matchKeyword('this')) { + this.nextToken(); + expr = this.finalize(node, new Node.ThisExpression()); + } + else if (this.matchKeyword('class')) { + expr = this.parseClassExpression(); + } + else { + expr = this.throwUnexpectedToken(this.nextToken()); + } + } + break; + default: + expr = this.throwUnexpectedToken(this.nextToken()); + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-array-initializer + Parser.prototype.parseSpreadElement = function () { + var node = this.createNode(); + this.expect('...'); + var arg = this.inheritCoverGrammar(this.parseAssignmentExpression); + return this.finalize(node, new Node.SpreadElement(arg)); + }; + Parser.prototype.parseArrayInitializer = function () { + var node = this.createNode(); + var elements = []; + this.expect('['); + while (!this.match(']')) { + if (this.match(',')) { + this.nextToken(); + elements.push(null); + } + else if (this.match('...')) { + var element = this.parseSpreadElement(); + if (!this.match(']')) { + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + this.expect(','); + } + elements.push(element); + } + else { + elements.push(this.inheritCoverGrammar(this.parseAssignmentExpression)); + if (!this.match(']')) { + this.expect(','); + } + } + } + this.expect(']'); + return this.finalize(node, new Node.ArrayExpression(elements)); + }; + // https://tc39.github.io/ecma262/#sec-object-initializer + Parser.prototype.parsePropertyMethod = function (params) { + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + var previousStrict = this.context.strict; + var previousAllowStrictDirective = this.context.allowStrictDirective; + this.context.allowStrictDirective = params.simple; + var body = this.isolateCoverGrammar(this.parseFunctionSourceElements); + if (this.context.strict && params.firstRestricted) { + this.tolerateUnexpectedToken(params.firstRestricted, params.message); + } + if (this.context.strict && params.stricted) { + this.tolerateUnexpectedToken(params.stricted, params.message); + } + this.context.strict = previousStrict; + this.context.allowStrictDirective = previousAllowStrictDirective; + return body; + }; + Parser.prototype.parsePropertyMethodFunction = function () { + var isGenerator = false; + var node = this.createNode(); + var previousAllowYield = this.context.allowYield; + this.context.allowYield = true; + var params = this.parseFormalParameters(); + var method = this.parsePropertyMethod(params); + this.context.allowYield = previousAllowYield; + return this.finalize(node, new Node.FunctionExpression(null, params.params, method, isGenerator)); + }; + Parser.prototype.parsePropertyMethodAsyncFunction = function () { + var node = this.createNode(); + var previousAllowYield = this.context.allowYield; + var previousAwait = this.context.await; + this.context.allowYield = false; + this.context.await = true; + var params = this.parseFormalParameters(); + var method = this.parsePropertyMethod(params); + this.context.allowYield = previousAllowYield; + this.context.await = previousAwait; + return this.finalize(node, new Node.AsyncFunctionExpression(null, params.params, method)); + }; + Parser.prototype.parseObjectPropertyKey = function () { + var node = this.createNode(); + var token = this.nextToken(); + var key; + switch (token.type) { + case 8 /* StringLiteral */: + case 6 /* NumericLiteral */: + if (this.context.strict && token.octal) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictOctalLiteral); + } + var raw = this.getTokenRaw(token); + key = this.finalize(node, new Node.Literal(token.value, raw)); + break; + case 3 /* Identifier */: + case 1 /* BooleanLiteral */: + case 5 /* NullLiteral */: + case 4 /* Keyword */: + key = this.finalize(node, new Node.Identifier(token.value)); + break; + case 7 /* Punctuator */: + if (token.value === '[') { + key = this.isolateCoverGrammar(this.parseAssignmentExpression); + this.expect(']'); + } + else { + key = this.throwUnexpectedToken(token); + } + break; + default: + key = this.throwUnexpectedToken(token); + } + return key; + }; + Parser.prototype.isPropertyKey = function (key, value) { + return (key.type === syntax_1.Syntax.Identifier && key.name === value) || + (key.type === syntax_1.Syntax.Literal && key.value === value); + }; + Parser.prototype.parseObjectProperty = function (hasProto) { + var node = this.createNode(); + var token = this.lookahead; + var kind; + var key = null; + var value = null; + var computed = false; + var method = false; + var shorthand = false; + var isAsync = false; + if (token.type === 3 /* Identifier */) { + var id = token.value; + this.nextToken(); + computed = this.match('['); + isAsync = !this.hasLineTerminator && (id === 'async') && + !this.match(':') && !this.match('(') && !this.match('*') && !this.match(','); + key = isAsync ? this.parseObjectPropertyKey() : this.finalize(node, new Node.Identifier(id)); + } + else if (this.match('*')) { + this.nextToken(); + } + else { + computed = this.match('['); + key = this.parseObjectPropertyKey(); + } + var lookaheadPropertyKey = this.qualifiedPropertyName(this.lookahead); + if (token.type === 3 /* Identifier */ && !isAsync && token.value === 'get' && lookaheadPropertyKey) { + kind = 'get'; + computed = this.match('['); + key = this.parseObjectPropertyKey(); + this.context.allowYield = false; + value = this.parseGetterMethod(); + } + else if (token.type === 3 /* Identifier */ && !isAsync && token.value === 'set' && lookaheadPropertyKey) { + kind = 'set'; + computed = this.match('['); + key = this.parseObjectPropertyKey(); + value = this.parseSetterMethod(); + } + else if (token.type === 7 /* Punctuator */ && token.value === '*' && lookaheadPropertyKey) { + kind = 'init'; + computed = this.match('['); + key = this.parseObjectPropertyKey(); + value = this.parseGeneratorMethod(); + method = true; + } + else { + if (!key) { + this.throwUnexpectedToken(this.lookahead); + } + kind = 'init'; + if (this.match(':') && !isAsync) { + if (!computed && this.isPropertyKey(key, '__proto__')) { + if (hasProto.value) { + this.tolerateError(messages_1.Messages.DuplicateProtoProperty); + } + hasProto.value = true; + } + this.nextToken(); + value = this.inheritCoverGrammar(this.parseAssignmentExpression); + } + else if (this.match('(')) { + value = isAsync ? this.parsePropertyMethodAsyncFunction() : this.parsePropertyMethodFunction(); + method = true; + } + else if (token.type === 3 /* Identifier */) { + var id = this.finalize(node, new Node.Identifier(token.value)); + if (this.match('=')) { + this.context.firstCoverInitializedNameError = this.lookahead; + this.nextToken(); + shorthand = true; + var init = this.isolateCoverGrammar(this.parseAssignmentExpression); + value = this.finalize(node, new Node.AssignmentPattern(id, init)); + } + else { + shorthand = true; + value = id; + } + } + else { + this.throwUnexpectedToken(this.nextToken()); + } + } + return this.finalize(node, new Node.Property(kind, key, computed, value, method, shorthand)); + }; + Parser.prototype.parseObjectInitializer = function () { + var node = this.createNode(); + this.expect('{'); + var properties = []; + var hasProto = { value: false }; + while (!this.match('}')) { + properties.push(this.parseObjectProperty(hasProto)); + if (!this.match('}')) { + this.expectCommaSeparator(); + } + } + this.expect('}'); + return this.finalize(node, new Node.ObjectExpression(properties)); + }; + // https://tc39.github.io/ecma262/#sec-template-literals + Parser.prototype.parseTemplateHead = function () { + assert_1.assert(this.lookahead.head, 'Template literal must start with a template head'); + var node = this.createNode(); + var token = this.nextToken(); + var raw = token.value; + var cooked = token.cooked; + return this.finalize(node, new Node.TemplateElement({ raw: raw, cooked: cooked }, token.tail)); + }; + Parser.prototype.parseTemplateElement = function () { + if (this.lookahead.type !== 10 /* Template */) { + this.throwUnexpectedToken(); + } + var node = this.createNode(); + var token = this.nextToken(); + var raw = token.value; + var cooked = token.cooked; + return this.finalize(node, new Node.TemplateElement({ raw: raw, cooked: cooked }, token.tail)); + }; + Parser.prototype.parseTemplateLiteral = function () { + var node = this.createNode(); + var expressions = []; + var quasis = []; + var quasi = this.parseTemplateHead(); + quasis.push(quasi); + while (!quasi.tail) { + expressions.push(this.parseExpression()); + quasi = this.parseTemplateElement(); + quasis.push(quasi); + } + return this.finalize(node, new Node.TemplateLiteral(quasis, expressions)); + }; + // https://tc39.github.io/ecma262/#sec-grouping-operator + Parser.prototype.reinterpretExpressionAsPattern = function (expr) { + switch (expr.type) { + case syntax_1.Syntax.Identifier: + case syntax_1.Syntax.MemberExpression: + case syntax_1.Syntax.RestElement: + case syntax_1.Syntax.AssignmentPattern: + break; + case syntax_1.Syntax.SpreadElement: + expr.type = syntax_1.Syntax.RestElement; + this.reinterpretExpressionAsPattern(expr.argument); + break; + case syntax_1.Syntax.ArrayExpression: + expr.type = syntax_1.Syntax.ArrayPattern; + for (var i = 0; i < expr.elements.length; i++) { + if (expr.elements[i] !== null) { + this.reinterpretExpressionAsPattern(expr.elements[i]); + } + } + break; + case syntax_1.Syntax.ObjectExpression: + expr.type = syntax_1.Syntax.ObjectPattern; + for (var i = 0; i < expr.properties.length; i++) { + this.reinterpretExpressionAsPattern(expr.properties[i].value); + } + break; + case syntax_1.Syntax.AssignmentExpression: + expr.type = syntax_1.Syntax.AssignmentPattern; + delete expr.operator; + this.reinterpretExpressionAsPattern(expr.left); + break; + default: + // Allow other node type for tolerant parsing. + break; + } + }; + Parser.prototype.parseGroupExpression = function () { + var expr; + this.expect('('); + if (this.match(')')) { + this.nextToken(); + if (!this.match('=>')) { + this.expect('=>'); + } + expr = { + type: ArrowParameterPlaceHolder, + params: [], + async: false + }; + } + else { + var startToken = this.lookahead; + var params = []; + if (this.match('...')) { + expr = this.parseRestElement(params); + this.expect(')'); + if (!this.match('=>')) { + this.expect('=>'); + } + expr = { + type: ArrowParameterPlaceHolder, + params: [expr], + async: false + }; + } + else { + var arrow = false; + this.context.isBindingElement = true; + expr = this.inheritCoverGrammar(this.parseAssignmentExpression); + if (this.match(',')) { + var expressions = []; + this.context.isAssignmentTarget = false; + expressions.push(expr); + while (this.lookahead.type !== 2 /* EOF */) { + if (!this.match(',')) { + break; + } + this.nextToken(); + if (this.match(')')) { + this.nextToken(); + for (var i = 0; i < expressions.length; i++) { + this.reinterpretExpressionAsPattern(expressions[i]); + } + arrow = true; + expr = { + type: ArrowParameterPlaceHolder, + params: expressions, + async: false + }; + } + else if (this.match('...')) { + if (!this.context.isBindingElement) { + this.throwUnexpectedToken(this.lookahead); + } + expressions.push(this.parseRestElement(params)); + this.expect(')'); + if (!this.match('=>')) { + this.expect('=>'); + } + this.context.isBindingElement = false; + for (var i = 0; i < expressions.length; i++) { + this.reinterpretExpressionAsPattern(expressions[i]); + } + arrow = true; + expr = { + type: ArrowParameterPlaceHolder, + params: expressions, + async: false + }; + } + else { + expressions.push(this.inheritCoverGrammar(this.parseAssignmentExpression)); + } + if (arrow) { + break; + } + } + if (!arrow) { + expr = this.finalize(this.startNode(startToken), new Node.SequenceExpression(expressions)); + } + } + if (!arrow) { + this.expect(')'); + if (this.match('=>')) { + if (expr.type === syntax_1.Syntax.Identifier && expr.name === 'yield') { + arrow = true; + expr = { + type: ArrowParameterPlaceHolder, + params: [expr], + async: false + }; + } + if (!arrow) { + if (!this.context.isBindingElement) { + this.throwUnexpectedToken(this.lookahead); + } + if (expr.type === syntax_1.Syntax.SequenceExpression) { + for (var i = 0; i < expr.expressions.length; i++) { + this.reinterpretExpressionAsPattern(expr.expressions[i]); + } + } + else { + this.reinterpretExpressionAsPattern(expr); + } + var parameters = (expr.type === syntax_1.Syntax.SequenceExpression ? expr.expressions : [expr]); + expr = { + type: ArrowParameterPlaceHolder, + params: parameters, + async: false + }; + } + } + this.context.isBindingElement = false; + } + } + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-left-hand-side-expressions + Parser.prototype.parseArguments = function () { + this.expect('('); + var args = []; + if (!this.match(')')) { + while (true) { + var expr = this.match('...') ? this.parseSpreadElement() : + this.isolateCoverGrammar(this.parseAssignmentExpression); + args.push(expr); + if (this.match(')')) { + break; + } + this.expectCommaSeparator(); + if (this.match(')')) { + break; + } + } + } + this.expect(')'); + return args; + }; + Parser.prototype.isIdentifierName = function (token) { + return token.type === 3 /* Identifier */ || + token.type === 4 /* Keyword */ || + token.type === 1 /* BooleanLiteral */ || + token.type === 5 /* NullLiteral */; + }; + Parser.prototype.parseIdentifierName = function () { + var node = this.createNode(); + var token = this.nextToken(); + if (!this.isIdentifierName(token)) { + this.throwUnexpectedToken(token); + } + return this.finalize(node, new Node.Identifier(token.value)); + }; + Parser.prototype.parseNewExpression = function () { + var node = this.createNode(); + var id = this.parseIdentifierName(); + assert_1.assert(id.name === 'new', 'New expression must start with `new`'); + var expr; + if (this.match('.')) { + this.nextToken(); + if (this.lookahead.type === 3 /* Identifier */ && this.context.inFunctionBody && this.lookahead.value === 'target') { + var property = this.parseIdentifierName(); + expr = new Node.MetaProperty(id, property); + } + else { + this.throwUnexpectedToken(this.lookahead); + } + } + else { + var callee = this.isolateCoverGrammar(this.parseLeftHandSideExpression); + var args = this.match('(') ? this.parseArguments() : []; + expr = new Node.NewExpression(callee, args); + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + } + return this.finalize(node, expr); + }; + Parser.prototype.parseAsyncArgument = function () { + var arg = this.parseAssignmentExpression(); + this.context.firstCoverInitializedNameError = null; + return arg; + }; + Parser.prototype.parseAsyncArguments = function () { + this.expect('('); + var args = []; + if (!this.match(')')) { + while (true) { + var expr = this.match('...') ? this.parseSpreadElement() : + this.isolateCoverGrammar(this.parseAsyncArgument); + args.push(expr); + if (this.match(')')) { + break; + } + this.expectCommaSeparator(); + if (this.match(')')) { + break; + } + } + } + this.expect(')'); + return args; + }; + Parser.prototype.parseLeftHandSideExpressionAllowCall = function () { + var startToken = this.lookahead; + var maybeAsync = this.matchContextualKeyword('async'); + var previousAllowIn = this.context.allowIn; + this.context.allowIn = true; + var expr; + if (this.matchKeyword('super') && this.context.inFunctionBody) { + expr = this.createNode(); + this.nextToken(); + expr = this.finalize(expr, new Node.Super()); + if (!this.match('(') && !this.match('.') && !this.match('[')) { + this.throwUnexpectedToken(this.lookahead); + } + } + else { + expr = this.inheritCoverGrammar(this.matchKeyword('new') ? this.parseNewExpression : this.parsePrimaryExpression); + } + while (true) { + if (this.match('.')) { + this.context.isBindingElement = false; + this.context.isAssignmentTarget = true; + this.expect('.'); + var property = this.parseIdentifierName(); + expr = this.finalize(this.startNode(startToken), new Node.StaticMemberExpression(expr, property)); + } + else if (this.match('(')) { + var asyncArrow = maybeAsync && (startToken.lineNumber === this.lookahead.lineNumber); + this.context.isBindingElement = false; + this.context.isAssignmentTarget = false; + var args = asyncArrow ? this.parseAsyncArguments() : this.parseArguments(); + expr = this.finalize(this.startNode(startToken), new Node.CallExpression(expr, args)); + if (asyncArrow && this.match('=>')) { + for (var i = 0; i < args.length; ++i) { + this.reinterpretExpressionAsPattern(args[i]); + } + expr = { + type: ArrowParameterPlaceHolder, + params: args, + async: true + }; + } + } + else if (this.match('[')) { + this.context.isBindingElement = false; + this.context.isAssignmentTarget = true; + this.expect('['); + var property = this.isolateCoverGrammar(this.parseExpression); + this.expect(']'); + expr = this.finalize(this.startNode(startToken), new Node.ComputedMemberExpression(expr, property)); + } + else if (this.lookahead.type === 10 /* Template */ && this.lookahead.head) { + var quasi = this.parseTemplateLiteral(); + expr = this.finalize(this.startNode(startToken), new Node.TaggedTemplateExpression(expr, quasi)); + } + else { + break; + } + } + this.context.allowIn = previousAllowIn; + return expr; + }; + Parser.prototype.parseSuper = function () { + var node = this.createNode(); + this.expectKeyword('super'); + if (!this.match('[') && !this.match('.')) { + this.throwUnexpectedToken(this.lookahead); + } + return this.finalize(node, new Node.Super()); + }; + Parser.prototype.parseLeftHandSideExpression = function () { + assert_1.assert(this.context.allowIn, 'callee of new expression always allow in keyword.'); + var node = this.startNode(this.lookahead); + var expr = (this.matchKeyword('super') && this.context.inFunctionBody) ? this.parseSuper() : + this.inheritCoverGrammar(this.matchKeyword('new') ? this.parseNewExpression : this.parsePrimaryExpression); + while (true) { + if (this.match('[')) { + this.context.isBindingElement = false; + this.context.isAssignmentTarget = true; + this.expect('['); + var property = this.isolateCoverGrammar(this.parseExpression); + this.expect(']'); + expr = this.finalize(node, new Node.ComputedMemberExpression(expr, property)); + } + else if (this.match('.')) { + this.context.isBindingElement = false; + this.context.isAssignmentTarget = true; + this.expect('.'); + var property = this.parseIdentifierName(); + expr = this.finalize(node, new Node.StaticMemberExpression(expr, property)); + } + else if (this.lookahead.type === 10 /* Template */ && this.lookahead.head) { + var quasi = this.parseTemplateLiteral(); + expr = this.finalize(node, new Node.TaggedTemplateExpression(expr, quasi)); + } + else { + break; + } + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-update-expressions + Parser.prototype.parseUpdateExpression = function () { + var expr; + var startToken = this.lookahead; + if (this.match('++') || this.match('--')) { + var node = this.startNode(startToken); + var token = this.nextToken(); + expr = this.inheritCoverGrammar(this.parseUnaryExpression); + if (this.context.strict && expr.type === syntax_1.Syntax.Identifier && this.scanner.isRestrictedWord(expr.name)) { + this.tolerateError(messages_1.Messages.StrictLHSPrefix); + } + if (!this.context.isAssignmentTarget) { + this.tolerateError(messages_1.Messages.InvalidLHSInAssignment); + } + var prefix = true; + expr = this.finalize(node, new Node.UpdateExpression(token.value, expr, prefix)); + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + } + else { + expr = this.inheritCoverGrammar(this.parseLeftHandSideExpressionAllowCall); + if (!this.hasLineTerminator && this.lookahead.type === 7 /* Punctuator */) { + if (this.match('++') || this.match('--')) { + if (this.context.strict && expr.type === syntax_1.Syntax.Identifier && this.scanner.isRestrictedWord(expr.name)) { + this.tolerateError(messages_1.Messages.StrictLHSPostfix); + } + if (!this.context.isAssignmentTarget) { + this.tolerateError(messages_1.Messages.InvalidLHSInAssignment); + } + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + var operator = this.nextToken().value; + var prefix = false; + expr = this.finalize(this.startNode(startToken), new Node.UpdateExpression(operator, expr, prefix)); + } + } + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-unary-operators + Parser.prototype.parseAwaitExpression = function () { + var node = this.createNode(); + this.nextToken(); + var argument = this.parseUnaryExpression(); + return this.finalize(node, new Node.AwaitExpression(argument)); + }; + Parser.prototype.parseUnaryExpression = function () { + var expr; + if (this.match('+') || this.match('-') || this.match('~') || this.match('!') || + this.matchKeyword('delete') || this.matchKeyword('void') || this.matchKeyword('typeof')) { + var node = this.startNode(this.lookahead); + var token = this.nextToken(); + expr = this.inheritCoverGrammar(this.parseUnaryExpression); + expr = this.finalize(node, new Node.UnaryExpression(token.value, expr)); + if (this.context.strict && expr.operator === 'delete' && expr.argument.type === syntax_1.Syntax.Identifier) { + this.tolerateError(messages_1.Messages.StrictDelete); + } + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + } + else if (this.context.await && this.matchContextualKeyword('await')) { + expr = this.parseAwaitExpression(); + } + else { + expr = this.parseUpdateExpression(); + } + return expr; + }; + Parser.prototype.parseExponentiationExpression = function () { + var startToken = this.lookahead; + var expr = this.inheritCoverGrammar(this.parseUnaryExpression); + if (expr.type !== syntax_1.Syntax.UnaryExpression && this.match('**')) { + this.nextToken(); + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + var left = expr; + var right = this.isolateCoverGrammar(this.parseExponentiationExpression); + expr = this.finalize(this.startNode(startToken), new Node.BinaryExpression('**', left, right)); + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-exp-operator + // https://tc39.github.io/ecma262/#sec-multiplicative-operators + // https://tc39.github.io/ecma262/#sec-additive-operators + // https://tc39.github.io/ecma262/#sec-bitwise-shift-operators + // https://tc39.github.io/ecma262/#sec-relational-operators + // https://tc39.github.io/ecma262/#sec-equality-operators + // https://tc39.github.io/ecma262/#sec-binary-bitwise-operators + // https://tc39.github.io/ecma262/#sec-binary-logical-operators + Parser.prototype.binaryPrecedence = function (token) { + var op = token.value; + var precedence; + if (token.type === 7 /* Punctuator */) { + precedence = this.operatorPrecedence[op] || 0; + } + else if (token.type === 4 /* Keyword */) { + precedence = (op === 'instanceof' || (this.context.allowIn && op === 'in')) ? 7 : 0; + } + else { + precedence = 0; + } + return precedence; + }; + Parser.prototype.parseBinaryExpression = function () { + var startToken = this.lookahead; + var expr = this.inheritCoverGrammar(this.parseExponentiationExpression); + var token = this.lookahead; + var prec = this.binaryPrecedence(token); + if (prec > 0) { + this.nextToken(); + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + var markers = [startToken, this.lookahead]; + var left = expr; + var right = this.isolateCoverGrammar(this.parseExponentiationExpression); + var stack = [left, token.value, right]; + var precedences = [prec]; + while (true) { + prec = this.binaryPrecedence(this.lookahead); + if (prec <= 0) { + break; + } + // Reduce: make a binary expression from the three topmost entries. + while ((stack.length > 2) && (prec <= precedences[precedences.length - 1])) { + right = stack.pop(); + var operator = stack.pop(); + precedences.pop(); + left = stack.pop(); + markers.pop(); + var node = this.startNode(markers[markers.length - 1]); + stack.push(this.finalize(node, new Node.BinaryExpression(operator, left, right))); + } + // Shift. + stack.push(this.nextToken().value); + precedences.push(prec); + markers.push(this.lookahead); + stack.push(this.isolateCoverGrammar(this.parseExponentiationExpression)); + } + // Final reduce to clean-up the stack. + var i = stack.length - 1; + expr = stack[i]; + var lastMarker = markers.pop(); + while (i > 1) { + var marker = markers.pop(); + var lastLineStart = lastMarker && lastMarker.lineStart; + var node = this.startNode(marker, lastLineStart); + var operator = stack[i - 1]; + expr = this.finalize(node, new Node.BinaryExpression(operator, stack[i - 2], expr)); + i -= 2; + lastMarker = marker; + } + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-conditional-operator + Parser.prototype.parseConditionalExpression = function () { + var startToken = this.lookahead; + var expr = this.inheritCoverGrammar(this.parseBinaryExpression); + if (this.match('?')) { + this.nextToken(); + var previousAllowIn = this.context.allowIn; + this.context.allowIn = true; + var consequent = this.isolateCoverGrammar(this.parseAssignmentExpression); + this.context.allowIn = previousAllowIn; + this.expect(':'); + var alternate = this.isolateCoverGrammar(this.parseAssignmentExpression); + expr = this.finalize(this.startNode(startToken), new Node.ConditionalExpression(expr, consequent, alternate)); + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-assignment-operators + Parser.prototype.checkPatternParam = function (options, param) { + switch (param.type) { + case syntax_1.Syntax.Identifier: + this.validateParam(options, param, param.name); + break; + case syntax_1.Syntax.RestElement: + this.checkPatternParam(options, param.argument); + break; + case syntax_1.Syntax.AssignmentPattern: + this.checkPatternParam(options, param.left); + break; + case syntax_1.Syntax.ArrayPattern: + for (var i = 0; i < param.elements.length; i++) { + if (param.elements[i] !== null) { + this.checkPatternParam(options, param.elements[i]); + } + } + break; + case syntax_1.Syntax.ObjectPattern: + for (var i = 0; i < param.properties.length; i++) { + this.checkPatternParam(options, param.properties[i].value); + } + break; + default: + break; + } + options.simple = options.simple && (param instanceof Node.Identifier); + }; + Parser.prototype.reinterpretAsCoverFormalsList = function (expr) { + var params = [expr]; + var options; + var asyncArrow = false; + switch (expr.type) { + case syntax_1.Syntax.Identifier: + break; + case ArrowParameterPlaceHolder: + params = expr.params; + asyncArrow = expr.async; + break; + default: + return null; + } + options = { + simple: true, + paramSet: {} + }; + for (var i = 0; i < params.length; ++i) { + var param = params[i]; + if (param.type === syntax_1.Syntax.AssignmentPattern) { + if (param.right.type === syntax_1.Syntax.YieldExpression) { + if (param.right.argument) { + this.throwUnexpectedToken(this.lookahead); + } + param.right.type = syntax_1.Syntax.Identifier; + param.right.name = 'yield'; + delete param.right.argument; + delete param.right.delegate; + } + } + else if (asyncArrow && param.type === syntax_1.Syntax.Identifier && param.name === 'await') { + this.throwUnexpectedToken(this.lookahead); + } + this.checkPatternParam(options, param); + params[i] = param; + } + if (this.context.strict || !this.context.allowYield) { + for (var i = 0; i < params.length; ++i) { + var param = params[i]; + if (param.type === syntax_1.Syntax.YieldExpression) { + this.throwUnexpectedToken(this.lookahead); + } + } + } + if (options.message === messages_1.Messages.StrictParamDupe) { + var token = this.context.strict ? options.stricted : options.firstRestricted; + this.throwUnexpectedToken(token, options.message); + } + return { + simple: options.simple, + params: params, + stricted: options.stricted, + firstRestricted: options.firstRestricted, + message: options.message + }; + }; + Parser.prototype.parseAssignmentExpression = function () { + var expr; + if (!this.context.allowYield && this.matchKeyword('yield')) { + expr = this.parseYieldExpression(); + } + else { + var startToken = this.lookahead; + var token = startToken; + expr = this.parseConditionalExpression(); + if (token.type === 3 /* Identifier */ && (token.lineNumber === this.lookahead.lineNumber) && token.value === 'async') { + if (this.lookahead.type === 3 /* Identifier */ || this.matchKeyword('yield')) { + var arg = this.parsePrimaryExpression(); + this.reinterpretExpressionAsPattern(arg); + expr = { + type: ArrowParameterPlaceHolder, + params: [arg], + async: true + }; + } + } + if (expr.type === ArrowParameterPlaceHolder || this.match('=>')) { + // https://tc39.github.io/ecma262/#sec-arrow-function-definitions + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + var isAsync = expr.async; + var list = this.reinterpretAsCoverFormalsList(expr); + if (list) { + if (this.hasLineTerminator) { + this.tolerateUnexpectedToken(this.lookahead); + } + this.context.firstCoverInitializedNameError = null; + var previousStrict = this.context.strict; + var previousAllowStrictDirective = this.context.allowStrictDirective; + this.context.allowStrictDirective = list.simple; + var previousAllowYield = this.context.allowYield; + var previousAwait = this.context.await; + this.context.allowYield = true; + this.context.await = isAsync; + var node = this.startNode(startToken); + this.expect('=>'); + var body = void 0; + if (this.match('{')) { + var previousAllowIn = this.context.allowIn; + this.context.allowIn = true; + body = this.parseFunctionSourceElements(); + this.context.allowIn = previousAllowIn; + } + else { + body = this.isolateCoverGrammar(this.parseAssignmentExpression); + } + var expression = body.type !== syntax_1.Syntax.BlockStatement; + if (this.context.strict && list.firstRestricted) { + this.throwUnexpectedToken(list.firstRestricted, list.message); + } + if (this.context.strict && list.stricted) { + this.tolerateUnexpectedToken(list.stricted, list.message); + } + expr = isAsync ? this.finalize(node, new Node.AsyncArrowFunctionExpression(list.params, body, expression)) : + this.finalize(node, new Node.ArrowFunctionExpression(list.params, body, expression)); + this.context.strict = previousStrict; + this.context.allowStrictDirective = previousAllowStrictDirective; + this.context.allowYield = previousAllowYield; + this.context.await = previousAwait; + } + } + else { + if (this.matchAssign()) { + if (!this.context.isAssignmentTarget) { + this.tolerateError(messages_1.Messages.InvalidLHSInAssignment); + } + if (this.context.strict && expr.type === syntax_1.Syntax.Identifier) { + var id = expr; + if (this.scanner.isRestrictedWord(id.name)) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictLHSAssignment); + } + if (this.scanner.isStrictModeReservedWord(id.name)) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictReservedWord); + } + } + if (!this.match('=')) { + this.context.isAssignmentTarget = false; + this.context.isBindingElement = false; + } + else { + this.reinterpretExpressionAsPattern(expr); + } + token = this.nextToken(); + var operator = token.value; + var right = this.isolateCoverGrammar(this.parseAssignmentExpression); + expr = this.finalize(this.startNode(startToken), new Node.AssignmentExpression(operator, expr, right)); + this.context.firstCoverInitializedNameError = null; + } + } + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-comma-operator + Parser.prototype.parseExpression = function () { + var startToken = this.lookahead; + var expr = this.isolateCoverGrammar(this.parseAssignmentExpression); + if (this.match(',')) { + var expressions = []; + expressions.push(expr); + while (this.lookahead.type !== 2 /* EOF */) { + if (!this.match(',')) { + break; + } + this.nextToken(); + expressions.push(this.isolateCoverGrammar(this.parseAssignmentExpression)); + } + expr = this.finalize(this.startNode(startToken), new Node.SequenceExpression(expressions)); + } + return expr; + }; + // https://tc39.github.io/ecma262/#sec-block + Parser.prototype.parseStatementListItem = function () { + var statement; + this.context.isAssignmentTarget = true; + this.context.isBindingElement = true; + if (this.lookahead.type === 4 /* Keyword */) { + switch (this.lookahead.value) { + case 'export': + if (!this.context.isModule) { + this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.IllegalExportDeclaration); + } + statement = this.parseExportDeclaration(); + break; + case 'import': + if (!this.context.isModule) { + this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.IllegalImportDeclaration); + } + statement = this.parseImportDeclaration(); + break; + case 'const': + statement = this.parseLexicalDeclaration({ inFor: false }); + break; + case 'function': + statement = this.parseFunctionDeclaration(); + break; + case 'class': + statement = this.parseClassDeclaration(); + break; + case 'let': + statement = this.isLexicalDeclaration() ? this.parseLexicalDeclaration({ inFor: false }) : this.parseStatement(); + break; + default: + statement = this.parseStatement(); + break; + } + } + else { + statement = this.parseStatement(); + } + return statement; + }; + Parser.prototype.parseBlock = function () { + var node = this.createNode(); + this.expect('{'); + var block = []; + while (true) { + if (this.match('}')) { + break; + } + block.push(this.parseStatementListItem()); + } + this.expect('}'); + return this.finalize(node, new Node.BlockStatement(block)); + }; + // https://tc39.github.io/ecma262/#sec-let-and-const-declarations + Parser.prototype.parseLexicalBinding = function (kind, options) { + var node = this.createNode(); + var params = []; + var id = this.parsePattern(params, kind); + if (this.context.strict && id.type === syntax_1.Syntax.Identifier) { + if (this.scanner.isRestrictedWord(id.name)) { + this.tolerateError(messages_1.Messages.StrictVarName); + } + } + var init = null; + if (kind === 'const') { + if (!this.matchKeyword('in') && !this.matchContextualKeyword('of')) { + if (this.match('=')) { + this.nextToken(); + init = this.isolateCoverGrammar(this.parseAssignmentExpression); + } + else { + this.throwError(messages_1.Messages.DeclarationMissingInitializer, 'const'); + } + } + } + else if ((!options.inFor && id.type !== syntax_1.Syntax.Identifier) || this.match('=')) { + this.expect('='); + init = this.isolateCoverGrammar(this.parseAssignmentExpression); + } + return this.finalize(node, new Node.VariableDeclarator(id, init)); + }; + Parser.prototype.parseBindingList = function (kind, options) { + var list = [this.parseLexicalBinding(kind, options)]; + while (this.match(',')) { + this.nextToken(); + list.push(this.parseLexicalBinding(kind, options)); + } + return list; + }; + Parser.prototype.isLexicalDeclaration = function () { + var state = this.scanner.saveState(); + this.scanner.scanComments(); + var next = this.scanner.lex(); + this.scanner.restoreState(state); + return (next.type === 3 /* Identifier */) || + (next.type === 7 /* Punctuator */ && next.value === '[') || + (next.type === 7 /* Punctuator */ && next.value === '{') || + (next.type === 4 /* Keyword */ && next.value === 'let') || + (next.type === 4 /* Keyword */ && next.value === 'yield'); + }; + Parser.prototype.parseLexicalDeclaration = function (options) { + var node = this.createNode(); + var kind = this.nextToken().value; + assert_1.assert(kind === 'let' || kind === 'const', 'Lexical declaration must be either let or const'); + var declarations = this.parseBindingList(kind, options); + this.consumeSemicolon(); + return this.finalize(node, new Node.VariableDeclaration(declarations, kind)); + }; + // https://tc39.github.io/ecma262/#sec-destructuring-binding-patterns + Parser.prototype.parseBindingRestElement = function (params, kind) { + var node = this.createNode(); + this.expect('...'); + var arg = this.parsePattern(params, kind); + return this.finalize(node, new Node.RestElement(arg)); + }; + Parser.prototype.parseArrayPattern = function (params, kind) { + var node = this.createNode(); + this.expect('['); + var elements = []; + while (!this.match(']')) { + if (this.match(',')) { + this.nextToken(); + elements.push(null); + } + else { + if (this.match('...')) { + elements.push(this.parseBindingRestElement(params, kind)); + break; + } + else { + elements.push(this.parsePatternWithDefault(params, kind)); + } + if (!this.match(']')) { + this.expect(','); + } + } + } + this.expect(']'); + return this.finalize(node, new Node.ArrayPattern(elements)); + }; + Parser.prototype.parsePropertyPattern = function (params, kind) { + var node = this.createNode(); + var computed = false; + var shorthand = false; + var method = false; + var key; + var value; + if (this.lookahead.type === 3 /* Identifier */) { + var keyToken = this.lookahead; + key = this.parseVariableIdentifier(); + var init = this.finalize(node, new Node.Identifier(keyToken.value)); + if (this.match('=')) { + params.push(keyToken); + shorthand = true; + this.nextToken(); + var expr = this.parseAssignmentExpression(); + value = this.finalize(this.startNode(keyToken), new Node.AssignmentPattern(init, expr)); + } + else if (!this.match(':')) { + params.push(keyToken); + shorthand = true; + value = init; + } + else { + this.expect(':'); + value = this.parsePatternWithDefault(params, kind); + } + } + else { + computed = this.match('['); + key = this.parseObjectPropertyKey(); + this.expect(':'); + value = this.parsePatternWithDefault(params, kind); + } + return this.finalize(node, new Node.Property('init', key, computed, value, method, shorthand)); + }; + Parser.prototype.parseObjectPattern = function (params, kind) { + var node = this.createNode(); + var properties = []; + this.expect('{'); + while (!this.match('}')) { + properties.push(this.parsePropertyPattern(params, kind)); + if (!this.match('}')) { + this.expect(','); + } + } + this.expect('}'); + return this.finalize(node, new Node.ObjectPattern(properties)); + }; + Parser.prototype.parsePattern = function (params, kind) { + var pattern; + if (this.match('[')) { + pattern = this.parseArrayPattern(params, kind); + } + else if (this.match('{')) { + pattern = this.parseObjectPattern(params, kind); + } + else { + if (this.matchKeyword('let') && (kind === 'const' || kind === 'let')) { + this.tolerateUnexpectedToken(this.lookahead, messages_1.Messages.LetInLexicalBinding); + } + params.push(this.lookahead); + pattern = this.parseVariableIdentifier(kind); + } + return pattern; + }; + Parser.prototype.parsePatternWithDefault = function (params, kind) { + var startToken = this.lookahead; + var pattern = this.parsePattern(params, kind); + if (this.match('=')) { + this.nextToken(); + var previousAllowYield = this.context.allowYield; + this.context.allowYield = true; + var right = this.isolateCoverGrammar(this.parseAssignmentExpression); + this.context.allowYield = previousAllowYield; + pattern = this.finalize(this.startNode(startToken), new Node.AssignmentPattern(pattern, right)); + } + return pattern; + }; + // https://tc39.github.io/ecma262/#sec-variable-statement + Parser.prototype.parseVariableIdentifier = function (kind) { + var node = this.createNode(); + var token = this.nextToken(); + if (token.type === 4 /* Keyword */ && token.value === 'yield') { + if (this.context.strict) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictReservedWord); + } + else if (!this.context.allowYield) { + this.throwUnexpectedToken(token); + } + } + else if (token.type !== 3 /* Identifier */) { + if (this.context.strict && token.type === 4 /* Keyword */ && this.scanner.isStrictModeReservedWord(token.value)) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictReservedWord); + } + else { + if (this.context.strict || token.value !== 'let' || kind !== 'var') { + this.throwUnexpectedToken(token); + } + } + } + else if ((this.context.isModule || this.context.await) && token.type === 3 /* Identifier */ && token.value === 'await') { + this.tolerateUnexpectedToken(token); + } + return this.finalize(node, new Node.Identifier(token.value)); + }; + Parser.prototype.parseVariableDeclaration = function (options) { + var node = this.createNode(); + var params = []; + var id = this.parsePattern(params, 'var'); + if (this.context.strict && id.type === syntax_1.Syntax.Identifier) { + if (this.scanner.isRestrictedWord(id.name)) { + this.tolerateError(messages_1.Messages.StrictVarName); + } + } + var init = null; + if (this.match('=')) { + this.nextToken(); + init = this.isolateCoverGrammar(this.parseAssignmentExpression); + } + else if (id.type !== syntax_1.Syntax.Identifier && !options.inFor) { + this.expect('='); + } + return this.finalize(node, new Node.VariableDeclarator(id, init)); + }; + Parser.prototype.parseVariableDeclarationList = function (options) { + var opt = { inFor: options.inFor }; + var list = []; + list.push(this.parseVariableDeclaration(opt)); + while (this.match(',')) { + this.nextToken(); + list.push(this.parseVariableDeclaration(opt)); + } + return list; + }; + Parser.prototype.parseVariableStatement = function () { + var node = this.createNode(); + this.expectKeyword('var'); + var declarations = this.parseVariableDeclarationList({ inFor: false }); + this.consumeSemicolon(); + return this.finalize(node, new Node.VariableDeclaration(declarations, 'var')); + }; + // https://tc39.github.io/ecma262/#sec-empty-statement + Parser.prototype.parseEmptyStatement = function () { + var node = this.createNode(); + this.expect(';'); + return this.finalize(node, new Node.EmptyStatement()); + }; + // https://tc39.github.io/ecma262/#sec-expression-statement + Parser.prototype.parseExpressionStatement = function () { + var node = this.createNode(); + var expr = this.parseExpression(); + this.consumeSemicolon(); + return this.finalize(node, new Node.ExpressionStatement(expr)); + }; + // https://tc39.github.io/ecma262/#sec-if-statement + Parser.prototype.parseIfClause = function () { + if (this.context.strict && this.matchKeyword('function')) { + this.tolerateError(messages_1.Messages.StrictFunction); + } + return this.parseStatement(); + }; + Parser.prototype.parseIfStatement = function () { + var node = this.createNode(); + var consequent; + var alternate = null; + this.expectKeyword('if'); + this.expect('('); + var test = this.parseExpression(); + if (!this.match(')') && this.config.tolerant) { + this.tolerateUnexpectedToken(this.nextToken()); + consequent = this.finalize(this.createNode(), new Node.EmptyStatement()); + } + else { + this.expect(')'); + consequent = this.parseIfClause(); + if (this.matchKeyword('else')) { + this.nextToken(); + alternate = this.parseIfClause(); + } + } + return this.finalize(node, new Node.IfStatement(test, consequent, alternate)); + }; + // https://tc39.github.io/ecma262/#sec-do-while-statement + Parser.prototype.parseDoWhileStatement = function () { + var node = this.createNode(); + this.expectKeyword('do'); + var previousInIteration = this.context.inIteration; + this.context.inIteration = true; + var body = this.parseStatement(); + this.context.inIteration = previousInIteration; + this.expectKeyword('while'); + this.expect('('); + var test = this.parseExpression(); + if (!this.match(')') && this.config.tolerant) { + this.tolerateUnexpectedToken(this.nextToken()); + } + else { + this.expect(')'); + if (this.match(';')) { + this.nextToken(); + } + } + return this.finalize(node, new Node.DoWhileStatement(body, test)); + }; + // https://tc39.github.io/ecma262/#sec-while-statement + Parser.prototype.parseWhileStatement = function () { + var node = this.createNode(); + var body; + this.expectKeyword('while'); + this.expect('('); + var test = this.parseExpression(); + if (!this.match(')') && this.config.tolerant) { + this.tolerateUnexpectedToken(this.nextToken()); + body = this.finalize(this.createNode(), new Node.EmptyStatement()); + } + else { + this.expect(')'); + var previousInIteration = this.context.inIteration; + this.context.inIteration = true; + body = this.parseStatement(); + this.context.inIteration = previousInIteration; + } + return this.finalize(node, new Node.WhileStatement(test, body)); + }; + // https://tc39.github.io/ecma262/#sec-for-statement + // https://tc39.github.io/ecma262/#sec-for-in-and-for-of-statements + Parser.prototype.parseForStatement = function () { + var init = null; + var test = null; + var update = null; + var forIn = true; + var left, right; + var node = this.createNode(); + this.expectKeyword('for'); + this.expect('('); + if (this.match(';')) { + this.nextToken(); + } + else { + if (this.matchKeyword('var')) { + init = this.createNode(); + this.nextToken(); + var previousAllowIn = this.context.allowIn; + this.context.allowIn = false; + var declarations = this.parseVariableDeclarationList({ inFor: true }); + this.context.allowIn = previousAllowIn; + if (declarations.length === 1 && this.matchKeyword('in')) { + var decl = declarations[0]; + if (decl.init && (decl.id.type === syntax_1.Syntax.ArrayPattern || decl.id.type === syntax_1.Syntax.ObjectPattern || this.context.strict)) { + this.tolerateError(messages_1.Messages.ForInOfLoopInitializer, 'for-in'); + } + init = this.finalize(init, new Node.VariableDeclaration(declarations, 'var')); + this.nextToken(); + left = init; + right = this.parseExpression(); + init = null; + } + else if (declarations.length === 1 && declarations[0].init === null && this.matchContextualKeyword('of')) { + init = this.finalize(init, new Node.VariableDeclaration(declarations, 'var')); + this.nextToken(); + left = init; + right = this.parseAssignmentExpression(); + init = null; + forIn = false; + } + else { + init = this.finalize(init, new Node.VariableDeclaration(declarations, 'var')); + this.expect(';'); + } + } + else if (this.matchKeyword('const') || this.matchKeyword('let')) { + init = this.createNode(); + var kind = this.nextToken().value; + if (!this.context.strict && this.lookahead.value === 'in') { + init = this.finalize(init, new Node.Identifier(kind)); + this.nextToken(); + left = init; + right = this.parseExpression(); + init = null; + } + else { + var previousAllowIn = this.context.allowIn; + this.context.allowIn = false; + var declarations = this.parseBindingList(kind, { inFor: true }); + this.context.allowIn = previousAllowIn; + if (declarations.length === 1 && declarations[0].init === null && this.matchKeyword('in')) { + init = this.finalize(init, new Node.VariableDeclaration(declarations, kind)); + this.nextToken(); + left = init; + right = this.parseExpression(); + init = null; + } + else if (declarations.length === 1 && declarations[0].init === null && this.matchContextualKeyword('of')) { + init = this.finalize(init, new Node.VariableDeclaration(declarations, kind)); + this.nextToken(); + left = init; + right = this.parseAssignmentExpression(); + init = null; + forIn = false; + } + else { + this.consumeSemicolon(); + init = this.finalize(init, new Node.VariableDeclaration(declarations, kind)); + } + } + } + else { + var initStartToken = this.lookahead; + var previousAllowIn = this.context.allowIn; + this.context.allowIn = false; + init = this.inheritCoverGrammar(this.parseAssignmentExpression); + this.context.allowIn = previousAllowIn; + if (this.matchKeyword('in')) { + if (!this.context.isAssignmentTarget || init.type === syntax_1.Syntax.AssignmentExpression) { + this.tolerateError(messages_1.Messages.InvalidLHSInForIn); + } + this.nextToken(); + this.reinterpretExpressionAsPattern(init); + left = init; + right = this.parseExpression(); + init = null; + } + else if (this.matchContextualKeyword('of')) { + if (!this.context.isAssignmentTarget || init.type === syntax_1.Syntax.AssignmentExpression) { + this.tolerateError(messages_1.Messages.InvalidLHSInForLoop); + } + this.nextToken(); + this.reinterpretExpressionAsPattern(init); + left = init; + right = this.parseAssignmentExpression(); + init = null; + forIn = false; + } + else { + if (this.match(',')) { + var initSeq = [init]; + while (this.match(',')) { + this.nextToken(); + initSeq.push(this.isolateCoverGrammar(this.parseAssignmentExpression)); + } + init = this.finalize(this.startNode(initStartToken), new Node.SequenceExpression(initSeq)); + } + this.expect(';'); + } + } + } + if (typeof left === 'undefined') { + if (!this.match(';')) { + test = this.parseExpression(); + } + this.expect(';'); + if (!this.match(')')) { + update = this.parseExpression(); + } + } + var body; + if (!this.match(')') && this.config.tolerant) { + this.tolerateUnexpectedToken(this.nextToken()); + body = this.finalize(this.createNode(), new Node.EmptyStatement()); + } + else { + this.expect(')'); + var previousInIteration = this.context.inIteration; + this.context.inIteration = true; + body = this.isolateCoverGrammar(this.parseStatement); + this.context.inIteration = previousInIteration; + } + return (typeof left === 'undefined') ? + this.finalize(node, new Node.ForStatement(init, test, update, body)) : + forIn ? this.finalize(node, new Node.ForInStatement(left, right, body)) : + this.finalize(node, new Node.ForOfStatement(left, right, body)); + }; + // https://tc39.github.io/ecma262/#sec-continue-statement + Parser.prototype.parseContinueStatement = function () { + var node = this.createNode(); + this.expectKeyword('continue'); + var label = null; + if (this.lookahead.type === 3 /* Identifier */ && !this.hasLineTerminator) { + var id = this.parseVariableIdentifier(); + label = id; + var key = '$' + id.name; + if (!Object.prototype.hasOwnProperty.call(this.context.labelSet, key)) { + this.throwError(messages_1.Messages.UnknownLabel, id.name); + } + } + this.consumeSemicolon(); + if (label === null && !this.context.inIteration) { + this.throwError(messages_1.Messages.IllegalContinue); + } + return this.finalize(node, new Node.ContinueStatement(label)); + }; + // https://tc39.github.io/ecma262/#sec-break-statement + Parser.prototype.parseBreakStatement = function () { + var node = this.createNode(); + this.expectKeyword('break'); + var label = null; + if (this.lookahead.type === 3 /* Identifier */ && !this.hasLineTerminator) { + var id = this.parseVariableIdentifier(); + var key = '$' + id.name; + if (!Object.prototype.hasOwnProperty.call(this.context.labelSet, key)) { + this.throwError(messages_1.Messages.UnknownLabel, id.name); + } + label = id; + } + this.consumeSemicolon(); + if (label === null && !this.context.inIteration && !this.context.inSwitch) { + this.throwError(messages_1.Messages.IllegalBreak); + } + return this.finalize(node, new Node.BreakStatement(label)); + }; + // https://tc39.github.io/ecma262/#sec-return-statement + Parser.prototype.parseReturnStatement = function () { + if (!this.context.inFunctionBody) { + this.tolerateError(messages_1.Messages.IllegalReturn); + } + var node = this.createNode(); + this.expectKeyword('return'); + var hasArgument = (!this.match(';') && !this.match('}') && + !this.hasLineTerminator && this.lookahead.type !== 2 /* EOF */) || + this.lookahead.type === 8 /* StringLiteral */ || + this.lookahead.type === 10 /* Template */; + var argument = hasArgument ? this.parseExpression() : null; + this.consumeSemicolon(); + return this.finalize(node, new Node.ReturnStatement(argument)); + }; + // https://tc39.github.io/ecma262/#sec-with-statement + Parser.prototype.parseWithStatement = function () { + if (this.context.strict) { + this.tolerateError(messages_1.Messages.StrictModeWith); + } + var node = this.createNode(); + var body; + this.expectKeyword('with'); + this.expect('('); + var object = this.parseExpression(); + if (!this.match(')') && this.config.tolerant) { + this.tolerateUnexpectedToken(this.nextToken()); + body = this.finalize(this.createNode(), new Node.EmptyStatement()); + } + else { + this.expect(')'); + body = this.parseStatement(); + } + return this.finalize(node, new Node.WithStatement(object, body)); + }; + // https://tc39.github.io/ecma262/#sec-switch-statement + Parser.prototype.parseSwitchCase = function () { + var node = this.createNode(); + var test; + if (this.matchKeyword('default')) { + this.nextToken(); + test = null; + } + else { + this.expectKeyword('case'); + test = this.parseExpression(); + } + this.expect(':'); + var consequent = []; + while (true) { + if (this.match('}') || this.matchKeyword('default') || this.matchKeyword('case')) { + break; + } + consequent.push(this.parseStatementListItem()); + } + return this.finalize(node, new Node.SwitchCase(test, consequent)); + }; + Parser.prototype.parseSwitchStatement = function () { + var node = this.createNode(); + this.expectKeyword('switch'); + this.expect('('); + var discriminant = this.parseExpression(); + this.expect(')'); + var previousInSwitch = this.context.inSwitch; + this.context.inSwitch = true; + var cases = []; + var defaultFound = false; + this.expect('{'); + while (true) { + if (this.match('}')) { + break; + } + var clause = this.parseSwitchCase(); + if (clause.test === null) { + if (defaultFound) { + this.throwError(messages_1.Messages.MultipleDefaultsInSwitch); + } + defaultFound = true; + } + cases.push(clause); + } + this.expect('}'); + this.context.inSwitch = previousInSwitch; + return this.finalize(node, new Node.SwitchStatement(discriminant, cases)); + }; + // https://tc39.github.io/ecma262/#sec-labelled-statements + Parser.prototype.parseLabelledStatement = function () { + var node = this.createNode(); + var expr = this.parseExpression(); + var statement; + if ((expr.type === syntax_1.Syntax.Identifier) && this.match(':')) { + this.nextToken(); + var id = expr; + var key = '$' + id.name; + if (Object.prototype.hasOwnProperty.call(this.context.labelSet, key)) { + this.throwError(messages_1.Messages.Redeclaration, 'Label', id.name); + } + this.context.labelSet[key] = true; + var body = void 0; + if (this.matchKeyword('class')) { + this.tolerateUnexpectedToken(this.lookahead); + body = this.parseClassDeclaration(); + } + else if (this.matchKeyword('function')) { + var token = this.lookahead; + var declaration = this.parseFunctionDeclaration(); + if (this.context.strict) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictFunction); + } + else if (declaration.generator) { + this.tolerateUnexpectedToken(token, messages_1.Messages.GeneratorInLegacyContext); + } + body = declaration; + } + else { + body = this.parseStatement(); + } + delete this.context.labelSet[key]; + statement = new Node.LabeledStatement(id, body); + } + else { + this.consumeSemicolon(); + statement = new Node.ExpressionStatement(expr); + } + return this.finalize(node, statement); + }; + // https://tc39.github.io/ecma262/#sec-throw-statement + Parser.prototype.parseThrowStatement = function () { + var node = this.createNode(); + this.expectKeyword('throw'); + if (this.hasLineTerminator) { + this.throwError(messages_1.Messages.NewlineAfterThrow); + } + var argument = this.parseExpression(); + this.consumeSemicolon(); + return this.finalize(node, new Node.ThrowStatement(argument)); + }; + // https://tc39.github.io/ecma262/#sec-try-statement + Parser.prototype.parseCatchClause = function () { + var node = this.createNode(); + this.expectKeyword('catch'); + this.expect('('); + if (this.match(')')) { + this.throwUnexpectedToken(this.lookahead); + } + var params = []; + var param = this.parsePattern(params); + var paramMap = {}; + for (var i = 0; i < params.length; i++) { + var key = '$' + params[i].value; + if (Object.prototype.hasOwnProperty.call(paramMap, key)) { + this.tolerateError(messages_1.Messages.DuplicateBinding, params[i].value); + } + paramMap[key] = true; + } + if (this.context.strict && param.type === syntax_1.Syntax.Identifier) { + if (this.scanner.isRestrictedWord(param.name)) { + this.tolerateError(messages_1.Messages.StrictCatchVariable); + } + } + this.expect(')'); + var body = this.parseBlock(); + return this.finalize(node, new Node.CatchClause(param, body)); + }; + Parser.prototype.parseFinallyClause = function () { + this.expectKeyword('finally'); + return this.parseBlock(); + }; + Parser.prototype.parseTryStatement = function () { + var node = this.createNode(); + this.expectKeyword('try'); + var block = this.parseBlock(); + var handler = this.matchKeyword('catch') ? this.parseCatchClause() : null; + var finalizer = this.matchKeyword('finally') ? this.parseFinallyClause() : null; + if (!handler && !finalizer) { + this.throwError(messages_1.Messages.NoCatchOrFinally); + } + return this.finalize(node, new Node.TryStatement(block, handler, finalizer)); + }; + // https://tc39.github.io/ecma262/#sec-debugger-statement + Parser.prototype.parseDebuggerStatement = function () { + var node = this.createNode(); + this.expectKeyword('debugger'); + this.consumeSemicolon(); + return this.finalize(node, new Node.DebuggerStatement()); + }; + // https://tc39.github.io/ecma262/#sec-ecmascript-language-statements-and-declarations + Parser.prototype.parseStatement = function () { + var statement; + switch (this.lookahead.type) { + case 1 /* BooleanLiteral */: + case 5 /* NullLiteral */: + case 6 /* NumericLiteral */: + case 8 /* StringLiteral */: + case 10 /* Template */: + case 9 /* RegularExpression */: + statement = this.parseExpressionStatement(); + break; + case 7 /* Punctuator */: + var value = this.lookahead.value; + if (value === '{') { + statement = this.parseBlock(); + } + else if (value === '(') { + statement = this.parseExpressionStatement(); + } + else if (value === ';') { + statement = this.parseEmptyStatement(); + } + else { + statement = this.parseExpressionStatement(); + } + break; + case 3 /* Identifier */: + statement = this.matchAsyncFunction() ? this.parseFunctionDeclaration() : this.parseLabelledStatement(); + break; + case 4 /* Keyword */: + switch (this.lookahead.value) { + case 'break': + statement = this.parseBreakStatement(); + break; + case 'continue': + statement = this.parseContinueStatement(); + break; + case 'debugger': + statement = this.parseDebuggerStatement(); + break; + case 'do': + statement = this.parseDoWhileStatement(); + break; + case 'for': + statement = this.parseForStatement(); + break; + case 'function': + statement = this.parseFunctionDeclaration(); + break; + case 'if': + statement = this.parseIfStatement(); + break; + case 'return': + statement = this.parseReturnStatement(); + break; + case 'switch': + statement = this.parseSwitchStatement(); + break; + case 'throw': + statement = this.parseThrowStatement(); + break; + case 'try': + statement = this.parseTryStatement(); + break; + case 'var': + statement = this.parseVariableStatement(); + break; + case 'while': + statement = this.parseWhileStatement(); + break; + case 'with': + statement = this.parseWithStatement(); + break; + default: + statement = this.parseExpressionStatement(); + break; + } + break; + default: + statement = this.throwUnexpectedToken(this.lookahead); + } + return statement; + }; + // https://tc39.github.io/ecma262/#sec-function-definitions + Parser.prototype.parseFunctionSourceElements = function () { + var node = this.createNode(); + this.expect('{'); + var body = this.parseDirectivePrologues(); + var previousLabelSet = this.context.labelSet; + var previousInIteration = this.context.inIteration; + var previousInSwitch = this.context.inSwitch; + var previousInFunctionBody = this.context.inFunctionBody; + this.context.labelSet = {}; + this.context.inIteration = false; + this.context.inSwitch = false; + this.context.inFunctionBody = true; + while (this.lookahead.type !== 2 /* EOF */) { + if (this.match('}')) { + break; + } + body.push(this.parseStatementListItem()); + } + this.expect('}'); + this.context.labelSet = previousLabelSet; + this.context.inIteration = previousInIteration; + this.context.inSwitch = previousInSwitch; + this.context.inFunctionBody = previousInFunctionBody; + return this.finalize(node, new Node.BlockStatement(body)); + }; + Parser.prototype.validateParam = function (options, param, name) { + var key = '$' + name; + if (this.context.strict) { + if (this.scanner.isRestrictedWord(name)) { + options.stricted = param; + options.message = messages_1.Messages.StrictParamName; + } + if (Object.prototype.hasOwnProperty.call(options.paramSet, key)) { + options.stricted = param; + options.message = messages_1.Messages.StrictParamDupe; + } + } + else if (!options.firstRestricted) { + if (this.scanner.isRestrictedWord(name)) { + options.firstRestricted = param; + options.message = messages_1.Messages.StrictParamName; + } + else if (this.scanner.isStrictModeReservedWord(name)) { + options.firstRestricted = param; + options.message = messages_1.Messages.StrictReservedWord; + } + else if (Object.prototype.hasOwnProperty.call(options.paramSet, key)) { + options.stricted = param; + options.message = messages_1.Messages.StrictParamDupe; + } + } + /* istanbul ignore next */ + if (typeof Object.defineProperty === 'function') { + Object.defineProperty(options.paramSet, key, { value: true, enumerable: true, writable: true, configurable: true }); + } + else { + options.paramSet[key] = true; + } + }; + Parser.prototype.parseRestElement = function (params) { + var node = this.createNode(); + this.expect('...'); + var arg = this.parsePattern(params); + if (this.match('=')) { + this.throwError(messages_1.Messages.DefaultRestParameter); + } + if (!this.match(')')) { + this.throwError(messages_1.Messages.ParameterAfterRestParameter); + } + return this.finalize(node, new Node.RestElement(arg)); + }; + Parser.prototype.parseFormalParameter = function (options) { + var params = []; + var param = this.match('...') ? this.parseRestElement(params) : this.parsePatternWithDefault(params); + for (var i = 0; i < params.length; i++) { + this.validateParam(options, params[i], params[i].value); + } + options.simple = options.simple && (param instanceof Node.Identifier); + options.params.push(param); + }; + Parser.prototype.parseFormalParameters = function (firstRestricted) { + var options; + options = { + simple: true, + params: [], + firstRestricted: firstRestricted + }; + this.expect('('); + if (!this.match(')')) { + options.paramSet = {}; + while (this.lookahead.type !== 2 /* EOF */) { + this.parseFormalParameter(options); + if (this.match(')')) { + break; + } + this.expect(','); + if (this.match(')')) { + break; + } + } + } + this.expect(')'); + return { + simple: options.simple, + params: options.params, + stricted: options.stricted, + firstRestricted: options.firstRestricted, + message: options.message + }; + }; + Parser.prototype.matchAsyncFunction = function () { + var match = this.matchContextualKeyword('async'); + if (match) { + var state = this.scanner.saveState(); + this.scanner.scanComments(); + var next = this.scanner.lex(); + this.scanner.restoreState(state); + match = (state.lineNumber === next.lineNumber) && (next.type === 4 /* Keyword */) && (next.value === 'function'); + } + return match; + }; + Parser.prototype.parseFunctionDeclaration = function (identifierIsOptional) { + var node = this.createNode(); + var isAsync = this.matchContextualKeyword('async'); + if (isAsync) { + this.nextToken(); + } + this.expectKeyword('function'); + var isGenerator = isAsync ? false : this.match('*'); + if (isGenerator) { + this.nextToken(); + } + var message; + var id = null; + var firstRestricted = null; + if (!identifierIsOptional || !this.match('(')) { + var token = this.lookahead; + id = this.parseVariableIdentifier(); + if (this.context.strict) { + if (this.scanner.isRestrictedWord(token.value)) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictFunctionName); + } + } + else { + if (this.scanner.isRestrictedWord(token.value)) { + firstRestricted = token; + message = messages_1.Messages.StrictFunctionName; + } + else if (this.scanner.isStrictModeReservedWord(token.value)) { + firstRestricted = token; + message = messages_1.Messages.StrictReservedWord; + } + } + } + var previousAllowAwait = this.context.await; + var previousAllowYield = this.context.allowYield; + this.context.await = isAsync; + this.context.allowYield = !isGenerator; + var formalParameters = this.parseFormalParameters(firstRestricted); + var params = formalParameters.params; + var stricted = formalParameters.stricted; + firstRestricted = formalParameters.firstRestricted; + if (formalParameters.message) { + message = formalParameters.message; + } + var previousStrict = this.context.strict; + var previousAllowStrictDirective = this.context.allowStrictDirective; + this.context.allowStrictDirective = formalParameters.simple; + var body = this.parseFunctionSourceElements(); + if (this.context.strict && firstRestricted) { + this.throwUnexpectedToken(firstRestricted, message); + } + if (this.context.strict && stricted) { + this.tolerateUnexpectedToken(stricted, message); + } + this.context.strict = previousStrict; + this.context.allowStrictDirective = previousAllowStrictDirective; + this.context.await = previousAllowAwait; + this.context.allowYield = previousAllowYield; + return isAsync ? this.finalize(node, new Node.AsyncFunctionDeclaration(id, params, body)) : + this.finalize(node, new Node.FunctionDeclaration(id, params, body, isGenerator)); + }; + Parser.prototype.parseFunctionExpression = function () { + var node = this.createNode(); + var isAsync = this.matchContextualKeyword('async'); + if (isAsync) { + this.nextToken(); + } + this.expectKeyword('function'); + var isGenerator = isAsync ? false : this.match('*'); + if (isGenerator) { + this.nextToken(); + } + var message; + var id = null; + var firstRestricted; + var previousAllowAwait = this.context.await; + var previousAllowYield = this.context.allowYield; + this.context.await = isAsync; + this.context.allowYield = !isGenerator; + if (!this.match('(')) { + var token = this.lookahead; + id = (!this.context.strict && !isGenerator && this.matchKeyword('yield')) ? this.parseIdentifierName() : this.parseVariableIdentifier(); + if (this.context.strict) { + if (this.scanner.isRestrictedWord(token.value)) { + this.tolerateUnexpectedToken(token, messages_1.Messages.StrictFunctionName); + } + } + else { + if (this.scanner.isRestrictedWord(token.value)) { + firstRestricted = token; + message = messages_1.Messages.StrictFunctionName; + } + else if (this.scanner.isStrictModeReservedWord(token.value)) { + firstRestricted = token; + message = messages_1.Messages.StrictReservedWord; + } + } + } + var formalParameters = this.parseFormalParameters(firstRestricted); + var params = formalParameters.params; + var stricted = formalParameters.stricted; + firstRestricted = formalParameters.firstRestricted; + if (formalParameters.message) { + message = formalParameters.message; + } + var previousStrict = this.context.strict; + var previousAllowStrictDirective = this.context.allowStrictDirective; + this.context.allowStrictDirective = formalParameters.simple; + var body = this.parseFunctionSourceElements(); + if (this.context.strict && firstRestricted) { + this.throwUnexpectedToken(firstRestricted, message); + } + if (this.context.strict && stricted) { + this.tolerateUnexpectedToken(stricted, message); + } + this.context.strict = previousStrict; + this.context.allowStrictDirective = previousAllowStrictDirective; + this.context.await = previousAllowAwait; + this.context.allowYield = previousAllowYield; + return isAsync ? this.finalize(node, new Node.AsyncFunctionExpression(id, params, body)) : + this.finalize(node, new Node.FunctionExpression(id, params, body, isGenerator)); + }; + // https://tc39.github.io/ecma262/#sec-directive-prologues-and-the-use-strict-directive + Parser.prototype.parseDirective = function () { + var token = this.lookahead; + var node = this.createNode(); + var expr = this.parseExpression(); + var directive = (expr.type === syntax_1.Syntax.Literal) ? this.getTokenRaw(token).slice(1, -1) : null; + this.consumeSemicolon(); + return this.finalize(node, directive ? new Node.Directive(expr, directive) : new Node.ExpressionStatement(expr)); + }; + Parser.prototype.parseDirectivePrologues = function () { + var firstRestricted = null; + var body = []; + while (true) { + var token = this.lookahead; + if (token.type !== 8 /* StringLiteral */) { + break; + } + var statement = this.parseDirective(); + body.push(statement); + var directive = statement.directive; + if (typeof directive !== 'string') { + break; + } + if (directive === 'use strict') { + this.context.strict = true; + if (firstRestricted) { + this.tolerateUnexpectedToken(firstRestricted, messages_1.Messages.StrictOctalLiteral); + } + if (!this.context.allowStrictDirective) { + this.tolerateUnexpectedToken(token, messages_1.Messages.IllegalLanguageModeDirective); + } + } + else { + if (!firstRestricted && token.octal) { + firstRestricted = token; + } + } + } + return body; + }; + // https://tc39.github.io/ecma262/#sec-method-definitions + Parser.prototype.qualifiedPropertyName = function (token) { + switch (token.type) { + case 3 /* Identifier */: + case 8 /* StringLiteral */: + case 1 /* BooleanLiteral */: + case 5 /* NullLiteral */: + case 6 /* NumericLiteral */: + case 4 /* Keyword */: + return true; + case 7 /* Punctuator */: + return token.value === '['; + default: + break; + } + return false; + }; + Parser.prototype.parseGetterMethod = function () { + var node = this.createNode(); + var isGenerator = false; + var previousAllowYield = this.context.allowYield; + this.context.allowYield = !isGenerator; + var formalParameters = this.parseFormalParameters(); + if (formalParameters.params.length > 0) { + this.tolerateError(messages_1.Messages.BadGetterArity); + } + var method = this.parsePropertyMethod(formalParameters); + this.context.allowYield = previousAllowYield; + return this.finalize(node, new Node.FunctionExpression(null, formalParameters.params, method, isGenerator)); + }; + Parser.prototype.parseSetterMethod = function () { + var node = this.createNode(); + var isGenerator = false; + var previousAllowYield = this.context.allowYield; + this.context.allowYield = !isGenerator; + var formalParameters = this.parseFormalParameters(); + if (formalParameters.params.length !== 1) { + this.tolerateError(messages_1.Messages.BadSetterArity); + } + else if (formalParameters.params[0] instanceof Node.RestElement) { + this.tolerateError(messages_1.Messages.BadSetterRestParameter); + } + var method = this.parsePropertyMethod(formalParameters); + this.context.allowYield = previousAllowYield; + return this.finalize(node, new Node.FunctionExpression(null, formalParameters.params, method, isGenerator)); + }; + Parser.prototype.parseGeneratorMethod = function () { + var node = this.createNode(); + var isGenerator = true; + var previousAllowYield = this.context.allowYield; + this.context.allowYield = true; + var params = this.parseFormalParameters(); + this.context.allowYield = false; + var method = this.parsePropertyMethod(params); + this.context.allowYield = previousAllowYield; + return this.finalize(node, new Node.FunctionExpression(null, params.params, method, isGenerator)); + }; + // https://tc39.github.io/ecma262/#sec-generator-function-definitions + Parser.prototype.isStartOfExpression = function () { + var start = true; + var value = this.lookahead.value; + switch (this.lookahead.type) { + case 7 /* Punctuator */: + start = (value === '[') || (value === '(') || (value === '{') || + (value === '+') || (value === '-') || + (value === '!') || (value === '~') || + (value === '++') || (value === '--') || + (value === '/') || (value === '/='); // regular expression literal + break; + case 4 /* Keyword */: + start = (value === 'class') || (value === 'delete') || + (value === 'function') || (value === 'let') || (value === 'new') || + (value === 'super') || (value === 'this') || (value === 'typeof') || + (value === 'void') || (value === 'yield'); + break; + default: + break; + } + return start; + }; + Parser.prototype.parseYieldExpression = function () { + var node = this.createNode(); + this.expectKeyword('yield'); + var argument = null; + var delegate = false; + if (!this.hasLineTerminator) { + var previousAllowYield = this.context.allowYield; + this.context.allowYield = false; + delegate = this.match('*'); + if (delegate) { + this.nextToken(); + argument = this.parseAssignmentExpression(); + } + else if (this.isStartOfExpression()) { + argument = this.parseAssignmentExpression(); + } + this.context.allowYield = previousAllowYield; + } + return this.finalize(node, new Node.YieldExpression(argument, delegate)); + }; + // https://tc39.github.io/ecma262/#sec-class-definitions + Parser.prototype.parseClassElement = function (hasConstructor) { + var token = this.lookahead; + var node = this.createNode(); + var kind = ''; + var key = null; + var value = null; + var computed = false; + var method = false; + var isStatic = false; + var isAsync = false; + if (this.match('*')) { + this.nextToken(); + } + else { + computed = this.match('['); + key = this.parseObjectPropertyKey(); + var id = key; + if (id.name === 'static' && (this.qualifiedPropertyName(this.lookahead) || this.match('*'))) { + token = this.lookahead; + isStatic = true; + computed = this.match('['); + if (this.match('*')) { + this.nextToken(); + } + else { + key = this.parseObjectPropertyKey(); + } + } + if ((token.type === 3 /* Identifier */) && !this.hasLineTerminator && (token.value === 'async')) { + var punctuator = this.lookahead.value; + if (punctuator !== ':' && punctuator !== '(' && punctuator !== '*') { + isAsync = true; + token = this.lookahead; + key = this.parseObjectPropertyKey(); + if (token.type === 3 /* Identifier */ && token.value === 'constructor') { + this.tolerateUnexpectedToken(token, messages_1.Messages.ConstructorIsAsync); + } + } + } + } + var lookaheadPropertyKey = this.qualifiedPropertyName(this.lookahead); + if (token.type === 3 /* Identifier */) { + if (token.value === 'get' && lookaheadPropertyKey) { + kind = 'get'; + computed = this.match('['); + key = this.parseObjectPropertyKey(); + this.context.allowYield = false; + value = this.parseGetterMethod(); + } + else if (token.value === 'set' && lookaheadPropertyKey) { + kind = 'set'; + computed = this.match('['); + key = this.parseObjectPropertyKey(); + value = this.parseSetterMethod(); + } + } + else if (token.type === 7 /* Punctuator */ && token.value === '*' && lookaheadPropertyKey) { + kind = 'init'; + computed = this.match('['); + key = this.parseObjectPropertyKey(); + value = this.parseGeneratorMethod(); + method = true; + } + if (!kind && key && this.match('(')) { + kind = 'init'; + value = isAsync ? this.parsePropertyMethodAsyncFunction() : this.parsePropertyMethodFunction(); + method = true; + } + if (!kind) { + this.throwUnexpectedToken(this.lookahead); + } + if (kind === 'init') { + kind = 'method'; + } + if (!computed) { + if (isStatic && this.isPropertyKey(key, 'prototype')) { + this.throwUnexpectedToken(token, messages_1.Messages.StaticPrototype); + } + if (!isStatic && this.isPropertyKey(key, 'constructor')) { + if (kind !== 'method' || !method || (value && value.generator)) { + this.throwUnexpectedToken(token, messages_1.Messages.ConstructorSpecialMethod); + } + if (hasConstructor.value) { + this.throwUnexpectedToken(token, messages_1.Messages.DuplicateConstructor); + } + else { + hasConstructor.value = true; + } + kind = 'constructor'; + } + } + return this.finalize(node, new Node.MethodDefinition(key, computed, value, kind, isStatic)); + }; + Parser.prototype.parseClassElementList = function () { + var body = []; + var hasConstructor = { value: false }; + this.expect('{'); + while (!this.match('}')) { + if (this.match(';')) { + this.nextToken(); + } + else { + body.push(this.parseClassElement(hasConstructor)); + } + } + this.expect('}'); + return body; + }; + Parser.prototype.parseClassBody = function () { + var node = this.createNode(); + var elementList = this.parseClassElementList(); + return this.finalize(node, new Node.ClassBody(elementList)); + }; + Parser.prototype.parseClassDeclaration = function (identifierIsOptional) { + var node = this.createNode(); + var previousStrict = this.context.strict; + this.context.strict = true; + this.expectKeyword('class'); + var id = (identifierIsOptional && (this.lookahead.type !== 3 /* Identifier */)) ? null : this.parseVariableIdentifier(); + var superClass = null; + if (this.matchKeyword('extends')) { + this.nextToken(); + superClass = this.isolateCoverGrammar(this.parseLeftHandSideExpressionAllowCall); + } + var classBody = this.parseClassBody(); + this.context.strict = previousStrict; + return this.finalize(node, new Node.ClassDeclaration(id, superClass, classBody)); + }; + Parser.prototype.parseClassExpression = function () { + var node = this.createNode(); + var previousStrict = this.context.strict; + this.context.strict = true; + this.expectKeyword('class'); + var id = (this.lookahead.type === 3 /* Identifier */) ? this.parseVariableIdentifier() : null; + var superClass = null; + if (this.matchKeyword('extends')) { + this.nextToken(); + superClass = this.isolateCoverGrammar(this.parseLeftHandSideExpressionAllowCall); + } + var classBody = this.parseClassBody(); + this.context.strict = previousStrict; + return this.finalize(node, new Node.ClassExpression(id, superClass, classBody)); + }; + // https://tc39.github.io/ecma262/#sec-scripts + // https://tc39.github.io/ecma262/#sec-modules + Parser.prototype.parseModule = function () { + this.context.strict = true; + this.context.isModule = true; + this.scanner.isModule = true; + var node = this.createNode(); + var body = this.parseDirectivePrologues(); + while (this.lookahead.type !== 2 /* EOF */) { + body.push(this.parseStatementListItem()); + } + return this.finalize(node, new Node.Module(body)); + }; + Parser.prototype.parseScript = function () { + var node = this.createNode(); + var body = this.parseDirectivePrologues(); + while (this.lookahead.type !== 2 /* EOF */) { + body.push(this.parseStatementListItem()); + } + return this.finalize(node, new Node.Script(body)); + }; + // https://tc39.github.io/ecma262/#sec-imports + Parser.prototype.parseModuleSpecifier = function () { + var node = this.createNode(); + if (this.lookahead.type !== 8 /* StringLiteral */) { + this.throwError(messages_1.Messages.InvalidModuleSpecifier); + } + var token = this.nextToken(); + var raw = this.getTokenRaw(token); + return this.finalize(node, new Node.Literal(token.value, raw)); + }; + // import {} ...; + Parser.prototype.parseImportSpecifier = function () { + var node = this.createNode(); + var imported; + var local; + if (this.lookahead.type === 3 /* Identifier */) { + imported = this.parseVariableIdentifier(); + local = imported; + if (this.matchContextualKeyword('as')) { + this.nextToken(); + local = this.parseVariableIdentifier(); + } + } + else { + imported = this.parseIdentifierName(); + local = imported; + if (this.matchContextualKeyword('as')) { + this.nextToken(); + local = this.parseVariableIdentifier(); + } + else { + this.throwUnexpectedToken(this.nextToken()); + } + } + return this.finalize(node, new Node.ImportSpecifier(local, imported)); + }; + // {foo, bar as bas} + Parser.prototype.parseNamedImports = function () { + this.expect('{'); + var specifiers = []; + while (!this.match('}')) { + specifiers.push(this.parseImportSpecifier()); + if (!this.match('}')) { + this.expect(','); + } + } + this.expect('}'); + return specifiers; + }; + // import ...; + Parser.prototype.parseImportDefaultSpecifier = function () { + var node = this.createNode(); + var local = this.parseIdentifierName(); + return this.finalize(node, new Node.ImportDefaultSpecifier(local)); + }; + // import <* as foo> ...; + Parser.prototype.parseImportNamespaceSpecifier = function () { + var node = this.createNode(); + this.expect('*'); + if (!this.matchContextualKeyword('as')) { + this.throwError(messages_1.Messages.NoAsAfterImportNamespace); + } + this.nextToken(); + var local = this.parseIdentifierName(); + return this.finalize(node, new Node.ImportNamespaceSpecifier(local)); + }; + Parser.prototype.parseImportDeclaration = function () { + if (this.context.inFunctionBody) { + this.throwError(messages_1.Messages.IllegalImportDeclaration); + } + var node = this.createNode(); + this.expectKeyword('import'); + var src; + var specifiers = []; + if (this.lookahead.type === 8 /* StringLiteral */) { + // import 'foo'; + src = this.parseModuleSpecifier(); + } + else { + if (this.match('{')) { + // import {bar} + specifiers = specifiers.concat(this.parseNamedImports()); + } + else if (this.match('*')) { + // import * as foo + specifiers.push(this.parseImportNamespaceSpecifier()); + } + else if (this.isIdentifierName(this.lookahead) && !this.matchKeyword('default')) { + // import foo + specifiers.push(this.parseImportDefaultSpecifier()); + if (this.match(',')) { + this.nextToken(); + if (this.match('*')) { + // import foo, * as foo + specifiers.push(this.parseImportNamespaceSpecifier()); + } + else if (this.match('{')) { + // import foo, {bar} + specifiers = specifiers.concat(this.parseNamedImports()); + } + else { + this.throwUnexpectedToken(this.lookahead); + } + } + } + else { + this.throwUnexpectedToken(this.nextToken()); + } + if (!this.matchContextualKeyword('from')) { + var message = this.lookahead.value ? messages_1.Messages.UnexpectedToken : messages_1.Messages.MissingFromClause; + this.throwError(message, this.lookahead.value); + } + this.nextToken(); + src = this.parseModuleSpecifier(); + } + this.consumeSemicolon(); + return this.finalize(node, new Node.ImportDeclaration(specifiers, src)); + }; + // https://tc39.github.io/ecma262/#sec-exports + Parser.prototype.parseExportSpecifier = function () { + var node = this.createNode(); + var local = this.parseIdentifierName(); + var exported = local; + if (this.matchContextualKeyword('as')) { + this.nextToken(); + exported = this.parseIdentifierName(); + } + return this.finalize(node, new Node.ExportSpecifier(local, exported)); + }; + Parser.prototype.parseExportDeclaration = function () { + if (this.context.inFunctionBody) { + this.throwError(messages_1.Messages.IllegalExportDeclaration); + } + var node = this.createNode(); + this.expectKeyword('export'); + var exportDeclaration; + if (this.matchKeyword('default')) { + // export default ... + this.nextToken(); + if (this.matchKeyword('function')) { + // export default function foo () {} + // export default function () {} + var declaration = this.parseFunctionDeclaration(true); + exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration)); + } + else if (this.matchKeyword('class')) { + // export default class foo {} + var declaration = this.parseClassDeclaration(true); + exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration)); + } + else if (this.matchContextualKeyword('async')) { + // export default async function f () {} + // export default async function () {} + // export default async x => x + var declaration = this.matchAsyncFunction() ? this.parseFunctionDeclaration(true) : this.parseAssignmentExpression(); + exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration)); + } + else { + if (this.matchContextualKeyword('from')) { + this.throwError(messages_1.Messages.UnexpectedToken, this.lookahead.value); + } + // export default {}; + // export default []; + // export default (1 + 2); + var declaration = this.match('{') ? this.parseObjectInitializer() : + this.match('[') ? this.parseArrayInitializer() : this.parseAssignmentExpression(); + this.consumeSemicolon(); + exportDeclaration = this.finalize(node, new Node.ExportDefaultDeclaration(declaration)); + } + } + else if (this.match('*')) { + // export * from 'foo'; + this.nextToken(); + if (!this.matchContextualKeyword('from')) { + var message = this.lookahead.value ? messages_1.Messages.UnexpectedToken : messages_1.Messages.MissingFromClause; + this.throwError(message, this.lookahead.value); + } + this.nextToken(); + var src = this.parseModuleSpecifier(); + this.consumeSemicolon(); + exportDeclaration = this.finalize(node, new Node.ExportAllDeclaration(src)); + } + else if (this.lookahead.type === 4 /* Keyword */) { + // export var f = 1; + var declaration = void 0; + switch (this.lookahead.value) { + case 'let': + case 'const': + declaration = this.parseLexicalDeclaration({ inFor: false }); + break; + case 'var': + case 'class': + case 'function': + declaration = this.parseStatementListItem(); + break; + default: + this.throwUnexpectedToken(this.lookahead); + } + exportDeclaration = this.finalize(node, new Node.ExportNamedDeclaration(declaration, [], null)); + } + else if (this.matchAsyncFunction()) { + var declaration = this.parseFunctionDeclaration(); + exportDeclaration = this.finalize(node, new Node.ExportNamedDeclaration(declaration, [], null)); + } + else { + var specifiers = []; + var source = null; + var isExportFromIdentifier = false; + this.expect('{'); + while (!this.match('}')) { + isExportFromIdentifier = isExportFromIdentifier || this.matchKeyword('default'); + specifiers.push(this.parseExportSpecifier()); + if (!this.match('}')) { + this.expect(','); + } + } + this.expect('}'); + if (this.matchContextualKeyword('from')) { + // export {default} from 'foo'; + // export {foo} from 'foo'; + this.nextToken(); + source = this.parseModuleSpecifier(); + this.consumeSemicolon(); + } + else if (isExportFromIdentifier) { + // export {default}; // missing fromClause + var message = this.lookahead.value ? messages_1.Messages.UnexpectedToken : messages_1.Messages.MissingFromClause; + this.throwError(message, this.lookahead.value); + } + else { + // export {foo}; + this.consumeSemicolon(); + } + exportDeclaration = this.finalize(node, new Node.ExportNamedDeclaration(null, specifiers, source)); + } + return exportDeclaration; + }; + return Parser; + }()); + exports.Parser = Parser; + + +/***/ }, +/* 9 */ +/***/ function(module, exports) { + + "use strict"; + // Ensure the condition is true, otherwise throw an error. + // This is only to have a better contract semantic, i.e. another safety net + // to catch a logic error. The condition shall be fulfilled in normal case. + // Do NOT use this to enforce a certain condition on any user input. + Object.defineProperty(exports, "__esModule", { value: true }); + function assert(condition, message) { + /* istanbul ignore if */ + if (!condition) { + throw new Error('ASSERT: ' + message); + } + } + exports.assert = assert; + + +/***/ }, +/* 10 */ +/***/ function(module, exports) { + + "use strict"; + /* tslint:disable:max-classes-per-file */ + Object.defineProperty(exports, "__esModule", { value: true }); + var ErrorHandler = (function () { + function ErrorHandler() { + this.errors = []; + this.tolerant = false; + } + ErrorHandler.prototype.recordError = function (error) { + this.errors.push(error); + }; + ErrorHandler.prototype.tolerate = function (error) { + if (this.tolerant) { + this.recordError(error); + } + else { + throw error; + } + }; + ErrorHandler.prototype.constructError = function (msg, column) { + var error = new Error(msg); + try { + throw error; + } + catch (base) { + /* istanbul ignore else */ + if (Object.create && Object.defineProperty) { + error = Object.create(base); + Object.defineProperty(error, 'column', { value: column }); + } + } + /* istanbul ignore next */ + return error; + }; + ErrorHandler.prototype.createError = function (index, line, col, description) { + var msg = 'Line ' + line + ': ' + description; + var error = this.constructError(msg, col); + error.index = index; + error.lineNumber = line; + error.description = description; + return error; + }; + ErrorHandler.prototype.throwError = function (index, line, col, description) { + throw this.createError(index, line, col, description); + }; + ErrorHandler.prototype.tolerateError = function (index, line, col, description) { + var error = this.createError(index, line, col, description); + if (this.tolerant) { + this.recordError(error); + } + else { + throw error; + } + }; + return ErrorHandler; + }()); + exports.ErrorHandler = ErrorHandler; + + +/***/ }, +/* 11 */ +/***/ function(module, exports) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + // Error messages should be identical to V8. + exports.Messages = { + BadGetterArity: 'Getter must not have any formal parameters', + BadSetterArity: 'Setter must have exactly one formal parameter', + BadSetterRestParameter: 'Setter function argument must not be a rest parameter', + ConstructorIsAsync: 'Class constructor may not be an async method', + ConstructorSpecialMethod: 'Class constructor may not be an accessor', + DeclarationMissingInitializer: 'Missing initializer in %0 declaration', + DefaultRestParameter: 'Unexpected token =', + DuplicateBinding: 'Duplicate binding %0', + DuplicateConstructor: 'A class may only have one constructor', + DuplicateProtoProperty: 'Duplicate __proto__ fields are not allowed in object literals', + ForInOfLoopInitializer: '%0 loop variable declaration may not have an initializer', + GeneratorInLegacyContext: 'Generator declarations are not allowed in legacy contexts', + IllegalBreak: 'Illegal break statement', + IllegalContinue: 'Illegal continue statement', + IllegalExportDeclaration: 'Unexpected token', + IllegalImportDeclaration: 'Unexpected token', + IllegalLanguageModeDirective: 'Illegal \'use strict\' directive in function with non-simple parameter list', + IllegalReturn: 'Illegal return statement', + InvalidEscapedReservedWord: 'Keyword must not contain escaped characters', + InvalidHexEscapeSequence: 'Invalid hexadecimal escape sequence', + InvalidLHSInAssignment: 'Invalid left-hand side in assignment', + InvalidLHSInForIn: 'Invalid left-hand side in for-in', + InvalidLHSInForLoop: 'Invalid left-hand side in for-loop', + InvalidModuleSpecifier: 'Unexpected token', + InvalidRegExp: 'Invalid regular expression', + LetInLexicalBinding: 'let is disallowed as a lexically bound name', + MissingFromClause: 'Unexpected token', + MultipleDefaultsInSwitch: 'More than one default clause in switch statement', + NewlineAfterThrow: 'Illegal newline after throw', + NoAsAfterImportNamespace: 'Unexpected token', + NoCatchOrFinally: 'Missing catch or finally after try', + ParameterAfterRestParameter: 'Rest parameter must be last formal parameter', + Redeclaration: '%0 \'%1\' has already been declared', + StaticPrototype: 'Classes may not have static property named prototype', + StrictCatchVariable: 'Catch variable may not be eval or arguments in strict mode', + StrictDelete: 'Delete of an unqualified identifier in strict mode.', + StrictFunction: 'In strict mode code, functions can only be declared at top level or inside a block', + StrictFunctionName: 'Function name may not be eval or arguments in strict mode', + StrictLHSAssignment: 'Assignment to eval or arguments is not allowed in strict mode', + StrictLHSPostfix: 'Postfix increment/decrement may not have eval or arguments operand in strict mode', + StrictLHSPrefix: 'Prefix increment/decrement may not have eval or arguments operand in strict mode', + StrictModeWith: 'Strict mode code may not include a with statement', + StrictOctalLiteral: 'Octal literals are not allowed in strict mode.', + StrictParamDupe: 'Strict mode function may not have duplicate parameter names', + StrictParamName: 'Parameter name eval or arguments is not allowed in strict mode', + StrictReservedWord: 'Use of future reserved word in strict mode', + StrictVarName: 'Variable name may not be eval or arguments in strict mode', + TemplateOctalLiteral: 'Octal literals are not allowed in template strings.', + UnexpectedEOS: 'Unexpected end of input', + UnexpectedIdentifier: 'Unexpected identifier', + UnexpectedNumber: 'Unexpected number', + UnexpectedReserved: 'Unexpected reserved word', + UnexpectedString: 'Unexpected string', + UnexpectedTemplate: 'Unexpected quasi %0', + UnexpectedToken: 'Unexpected token %0', + UnexpectedTokenIllegal: 'Unexpected token ILLEGAL', + UnknownLabel: 'Undefined label \'%0\'', + UnterminatedRegExp: 'Invalid regular expression: missing /' + }; + + +/***/ }, +/* 12 */ +/***/ function(module, exports, __nested_webpack_require_226595__) { + + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + var assert_1 = __nested_webpack_require_226595__(9); + var character_1 = __nested_webpack_require_226595__(4); + var messages_1 = __nested_webpack_require_226595__(11); + function hexValue(ch) { + return '0123456789abcdef'.indexOf(ch.toLowerCase()); + } + function octalValue(ch) { + return '01234567'.indexOf(ch); + } + var Scanner = (function () { + function Scanner(code, handler) { + this.source = code; + this.errorHandler = handler; + this.trackComment = false; + this.isModule = false; + this.length = code.length; + this.index = 0; + this.lineNumber = (code.length > 0) ? 1 : 0; + this.lineStart = 0; + this.curlyStack = []; + } + Scanner.prototype.saveState = function () { + return { + index: this.index, + lineNumber: this.lineNumber, + lineStart: this.lineStart + }; + }; + Scanner.prototype.restoreState = function (state) { + this.index = state.index; + this.lineNumber = state.lineNumber; + this.lineStart = state.lineStart; + }; + Scanner.prototype.eof = function () { + return this.index >= this.length; + }; + Scanner.prototype.throwUnexpectedToken = function (message) { + if (message === void 0) { message = messages_1.Messages.UnexpectedTokenIllegal; } + return this.errorHandler.throwError(this.index, this.lineNumber, this.index - this.lineStart + 1, message); + }; + Scanner.prototype.tolerateUnexpectedToken = function (message) { + if (message === void 0) { message = messages_1.Messages.UnexpectedTokenIllegal; } + this.errorHandler.tolerateError(this.index, this.lineNumber, this.index - this.lineStart + 1, message); + }; + // https://tc39.github.io/ecma262/#sec-comments + Scanner.prototype.skipSingleLineComment = function (offset) { + var comments = []; + var start, loc; + if (this.trackComment) { + comments = []; + start = this.index - offset; + loc = { + start: { + line: this.lineNumber, + column: this.index - this.lineStart - offset + }, + end: {} + }; + } + while (!this.eof()) { + var ch = this.source.charCodeAt(this.index); + ++this.index; + if (character_1.Character.isLineTerminator(ch)) { + if (this.trackComment) { + loc.end = { + line: this.lineNumber, + column: this.index - this.lineStart - 1 + }; + var entry = { + multiLine: false, + slice: [start + offset, this.index - 1], + range: [start, this.index - 1], + loc: loc + }; + comments.push(entry); + } + if (ch === 13 && this.source.charCodeAt(this.index) === 10) { + ++this.index; + } + ++this.lineNumber; + this.lineStart = this.index; + return comments; + } + } + if (this.trackComment) { + loc.end = { + line: this.lineNumber, + column: this.index - this.lineStart + }; + var entry = { + multiLine: false, + slice: [start + offset, this.index], + range: [start, this.index], + loc: loc + }; + comments.push(entry); + } + return comments; + }; + Scanner.prototype.skipMultiLineComment = function () { + var comments = []; + var start, loc; + if (this.trackComment) { + comments = []; + start = this.index - 2; + loc = { + start: { + line: this.lineNumber, + column: this.index - this.lineStart - 2 + }, + end: {} + }; + } + while (!this.eof()) { + var ch = this.source.charCodeAt(this.index); + if (character_1.Character.isLineTerminator(ch)) { + if (ch === 0x0D && this.source.charCodeAt(this.index + 1) === 0x0A) { + ++this.index; + } + ++this.lineNumber; + ++this.index; + this.lineStart = this.index; + } + else if (ch === 0x2A) { + // Block comment ends with '*/'. + if (this.source.charCodeAt(this.index + 1) === 0x2F) { + this.index += 2; + if (this.trackComment) { + loc.end = { + line: this.lineNumber, + column: this.index - this.lineStart + }; + var entry = { + multiLine: true, + slice: [start + 2, this.index - 2], + range: [start, this.index], + loc: loc + }; + comments.push(entry); + } + return comments; + } + ++this.index; + } + else { + ++this.index; + } + } + // Ran off the end of the file - the whole thing is a comment + if (this.trackComment) { + loc.end = { + line: this.lineNumber, + column: this.index - this.lineStart + }; + var entry = { + multiLine: true, + slice: [start + 2, this.index], + range: [start, this.index], + loc: loc + }; + comments.push(entry); + } + this.tolerateUnexpectedToken(); + return comments; + }; + Scanner.prototype.scanComments = function () { + var comments; + if (this.trackComment) { + comments = []; + } + var start = (this.index === 0); + while (!this.eof()) { + var ch = this.source.charCodeAt(this.index); + if (character_1.Character.isWhiteSpace(ch)) { + ++this.index; + } + else if (character_1.Character.isLineTerminator(ch)) { + ++this.index; + if (ch === 0x0D && this.source.charCodeAt(this.index) === 0x0A) { + ++this.index; + } + ++this.lineNumber; + this.lineStart = this.index; + start = true; + } + else if (ch === 0x2F) { + ch = this.source.charCodeAt(this.index + 1); + if (ch === 0x2F) { + this.index += 2; + var comment = this.skipSingleLineComment(2); + if (this.trackComment) { + comments = comments.concat(comment); + } + start = true; + } + else if (ch === 0x2A) { + this.index += 2; + var comment = this.skipMultiLineComment(); + if (this.trackComment) { + comments = comments.concat(comment); + } + } + else { + break; + } + } + else if (start && ch === 0x2D) { + // U+003E is '>' + if ((this.source.charCodeAt(this.index + 1) === 0x2D) && (this.source.charCodeAt(this.index + 2) === 0x3E)) { + // '-->' is a single-line comment + this.index += 3; + var comment = this.skipSingleLineComment(3); + if (this.trackComment) { + comments = comments.concat(comment); + } + } + else { + break; + } + } + else if (ch === 0x3C && !this.isModule) { + if (this.source.slice(this.index + 1, this.index + 4) === '!--') { + this.index += 4; // ` regexps + set = set.map((s, si, set) => s.map(this.parse, this)) + + this.debug(this.pattern, set) + + // filter out everything that didn't compile properly. + set = set.filter(s => s.indexOf(false) === -1) + + this.debug(this.pattern, set) + + this.set = set + } + + parseNegate () { + if (this.options.nonegate) return + + const pattern = this.pattern + let negate = false + let negateOffset = 0 + + for (let i = 0; i < pattern.length && pattern.charAt(i) === '!'; i++) { + negate = !negate + negateOffset++ + } + + if (negateOffset) this.pattern = pattern.slice(negateOffset) + this.negate = negate + } + + // set partial to true to test if, for example, + // "/a/b" matches the start of "/*/b/*/d" + // Partial means, if you run out of file before you run + // out of pattern, then that's fine, as long as all + // the parts match. + matchOne (file, pattern, partial) { + var options = this.options + + this.debug('matchOne', + { 'this': this, file: file, pattern: pattern }) + + this.debug('matchOne', file.length, pattern.length) + + for (var fi = 0, + pi = 0, + fl = file.length, + pl = pattern.length + ; (fi < fl) && (pi < pl) + ; fi++, pi++) { + this.debug('matchOne loop') + var p = pattern[pi] + var f = file[fi] + + this.debug(pattern, p, f) + + // should be impossible. + // some invalid regexp stuff in the set. + /* istanbul ignore if */ + if (p === false) return false + + if (p === GLOBSTAR) { + this.debug('GLOBSTAR', [pattern, p, f]) + + // "**" + // a/**/b/**/c would match the following: + // a/b/x/y/z/c + // a/x/y/z/b/c + // a/b/x/b/x/c + // a/b/c + // To do this, take the rest of the pattern after + // the **, and see if it would match the file remainder. + // If so, return success. + // If not, the ** "swallows" a segment, and try again. + // This is recursively awful. + // + // a/**/b/**/c matching a/b/x/y/z/c + // - a matches a + // - doublestar + // - matchOne(b/x/y/z/c, b/**/c) + // - b matches b + // - doublestar + // - matchOne(x/y/z/c, c) -> no + // - matchOne(y/z/c, c) -> no + // - matchOne(z/c, c) -> no + // - matchOne(c, c) yes, hit + var fr = fi + var pr = pi + 1 + if (pr === pl) { + this.debug('** at the end') + // a ** at the end will just swallow the rest. + // We have found a match. + // however, it will not swallow /.x, unless + // options.dot is set. + // . and .. are *never* matched by **, for explosively + // exponential reasons. + for (; fi < fl; fi++) { + if (file[fi] === '.' || file[fi] === '..' || + (!options.dot && file[fi].charAt(0) === '.')) return false + } + return true + } + + // ok, let's see if we can swallow whatever we can. + while (fr < fl) { + var swallowee = file[fr] + + this.debug('\nglobstar while', file, fr, pattern, pr, swallowee) + + // XXX remove this slice. Just pass the start index. + if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) { + this.debug('globstar found match!', fr, fl, swallowee) + // found a match. + return true + } else { + // can't swallow "." or ".." ever. + // can only swallow ".foo" when explicitly asked. + if (swallowee === '.' || swallowee === '..' || + (!options.dot && swallowee.charAt(0) === '.')) { + this.debug('dot detected!', file, fr, pattern, pr) + break + } + + // ** swallows a segment, and continue. + this.debug('globstar swallow a segment, and continue') + fr++ + } + } + + // no match was found. + // However, in partial mode, we can't say this is necessarily over. + // If there's more *pattern* left, then + /* istanbul ignore if */ + if (partial) { + // ran out of file + this.debug('\n>>> no match, partial?', file, fr, pattern, pr) + if (fr === fl) return true + } + return false + } + + // something other than ** + // non-magic patterns just have to match exactly + // patterns with magic have been turned into regexps. + var hit + if (typeof p === 'string') { + hit = f === p + this.debug('string match', p, f, hit) + } else { + hit = f.match(p) + this.debug('pattern match', p, f, hit) + } + + if (!hit) return false + } + + // Note: ending in / means that we'll get a final "" + // at the end of the pattern. This can only match a + // corresponding "" at the end of the file. + // If the file ends in /, then it can only match a + // a pattern that ends in /, unless the pattern just + // doesn't have any more for it. But, a/b/ should *not* + // match "a/b/*", even though "" matches against the + // [^/]*? pattern, except in partial mode, where it might + // simply not be reached yet. + // However, a/b/ should still satisfy a/* + + // now either we fell off the end of the pattern, or we're done. + if (fi === fl && pi === pl) { + // ran out of pattern and filename at the same time. + // an exact hit! + return true + } else if (fi === fl) { + // ran out of file, but still had pattern left. + // this is ok if we're doing the match as part of + // a glob fs traversal. + return partial + } else /* istanbul ignore else */ if (pi === pl) { + // ran out of pattern, still have file left. + // this is only acceptable if we're on the very last + // empty segment of a file with a trailing slash. + // a/* should match a/b/ + return (fi === fl - 1) && (file[fi] === '') + } + + // should be unreachable. + /* istanbul ignore next */ + throw new Error('wtf?') + } + + braceExpand () { + return braceExpand(this.pattern, this.options) + } + + parse (pattern, isSub) { + assertValidPattern(pattern) + + const options = this.options + + // shortcuts + if (pattern === '**') { + if (!options.noglobstar) + return GLOBSTAR + else + pattern = '*' + } + if (pattern === '') return '' + + let re = '' + let hasMagic = !!options.nocase + let escaping = false + // ? => one single character + const patternListStack = [] + const negativeLists = [] + let stateChar + let inClass = false + let reClassStart = -1 + let classStart = -1 + let cs + let pl + let sp + // . and .. never match anything that doesn't start with ., + // even when options.dot is set. + const patternStart = pattern.charAt(0) === '.' ? '' // anything + // not (start or / followed by . or .. followed by / or end) + : options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))' + : '(?!\\.)' + + const clearStateChar = () => { + if (stateChar) { + // we had some state-tracking character + // that wasn't consumed by this pass. + switch (stateChar) { + case '*': + re += star + hasMagic = true + break + case '?': + re += qmark + hasMagic = true + break + default: + re += '\\' + stateChar + break + } + this.debug('clearStateChar %j %j', stateChar, re) + stateChar = false + } + } + + for (let i = 0, c; (i < pattern.length) && (c = pattern.charAt(i)); i++) { + this.debug('%s\t%s %s %j', pattern, i, re, c) + + // skip over any that are escaped. + if (escaping) { + /* istanbul ignore next - completely not allowed, even escaped. */ + if (c === '/') { + return false + } + + if (reSpecials[c]) { + re += '\\' + } + re += c + escaping = false + continue + } + + switch (c) { + /* istanbul ignore next */ + case '/': { + // Should already be path-split by now. + return false + } + + case '\\': + clearStateChar() + escaping = true + continue + + // the various stateChar values + // for the "extglob" stuff. + case '?': + case '*': + case '+': + case '@': + case '!': + this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c) + + // all of those are literals inside a class, except that + // the glob [!a] means [^a] in regexp + if (inClass) { + this.debug(' in class') + if (c === '!' && i === classStart + 1) c = '^' + re += c + continue + } + + // if we already have a stateChar, then it means + // that there was something like ** or +? in there. + // Handle the stateChar, then proceed with this one. + this.debug('call clearStateChar %j', stateChar) + clearStateChar() + stateChar = c + // if extglob is disabled, then +(asdf|foo) isn't a thing. + // just clear the statechar *now*, rather than even diving into + // the patternList stuff. + if (options.noext) clearStateChar() + continue + + case '(': + if (inClass) { + re += '(' + continue + } + + if (!stateChar) { + re += '\\(' + continue + } + + patternListStack.push({ + type: stateChar, + start: i - 1, + reStart: re.length, + open: plTypes[stateChar].open, + close: plTypes[stateChar].close + }) + // negation is (?:(?!js)[^/]*) + re += stateChar === '!' ? '(?:(?!(?:' : '(?:' + this.debug('plType %j %j', stateChar, re) + stateChar = false + continue + + case ')': + if (inClass || !patternListStack.length) { + re += '\\)' + continue + } + + clearStateChar() + hasMagic = true + pl = patternListStack.pop() + // negation is (?:(?!js)[^/]*) + // The others are (?:) + re += pl.close + if (pl.type === '!') { + negativeLists.push(pl) + } + pl.reEnd = re.length + continue + + case '|': + if (inClass || !patternListStack.length) { + re += '\\|' + continue + } + + clearStateChar() + re += '|' + continue + + // these are mostly the same in regexp and glob + case '[': + // swallow any state-tracking char before the [ + clearStateChar() + + if (inClass) { + re += '\\' + c + continue + } + + inClass = true + classStart = i + reClassStart = re.length + re += c + continue + + case ']': + // a right bracket shall lose its special + // meaning and represent itself in + // a bracket expression if it occurs + // first in the list. -- POSIX.2 2.8.3.2 + if (i === classStart + 1 || !inClass) { + re += '\\' + c + continue + } + + // handle the case where we left a class open. + // "[z-a]" is valid, equivalent to "\[z-a\]" + // split where the last [ was, make sure we don't have + // an invalid re. if so, re-walk the contents of the + // would-be class to re-translate any characters that + // were passed through as-is + // TODO: It would probably be faster to determine this + // without a try/catch and a new RegExp, but it's tricky + // to do safely. For now, this is safe and works. + cs = pattern.substring(classStart + 1, i) + try { + RegExp('[' + cs + ']') + } catch (er) { + // not a valid class! + sp = this.parse(cs, SUBPARSE) + re = re.substring(0, reClassStart) + '\\[' + sp[0] + '\\]' + hasMagic = hasMagic || sp[1] + inClass = false + continue + } + + // finish up the class. + hasMagic = true + inClass = false + re += c + continue + + default: + // swallow any state char that wasn't consumed + clearStateChar() + + if (reSpecials[c] && !(c === '^' && inClass)) { + re += '\\' + } + + re += c + break + + } // switch + } // for + + // handle the case where we left a class open. + // "[abc" is valid, equivalent to "\[abc" + if (inClass) { + // split where the last [ was, and escape it + // this is a huge pita. We now have to re-walk + // the contents of the would-be class to re-translate + // any characters that were passed through as-is + cs = pattern.slice(classStart + 1) + sp = this.parse(cs, SUBPARSE) + re = re.substring(0, reClassStart) + '\\[' + sp[0] + hasMagic = hasMagic || sp[1] + } + + // handle the case where we had a +( thing at the *end* + // of the pattern. + // each pattern list stack adds 3 chars, and we need to go through + // and escape any | chars that were passed through as-is for the regexp. + // Go through and escape them, taking care not to double-escape any + // | chars that were already escaped. + for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) { + let tail + tail = re.slice(pl.reStart + pl.open.length) + this.debug('setting tail', re, pl) + // maybe some even number of \, then maybe 1 \, followed by a | + tail = tail.replace(/((?:\\{2}){0,64})(\\?)\|/g, (_, $1, $2) => { + /* istanbul ignore else - should already be done */ + if (!$2) { + // the | isn't already escaped, so escape it. + $2 = '\\' + } + + // need to escape all those slashes *again*, without escaping the + // one that we need for escaping the | character. As it works out, + // escaping an even number of slashes can be done by simply repeating + // it exactly after itself. That's why this trick works. + // + // I am sorry that you have to see this. + return $1 + $1 + $2 + '|' + }) + + this.debug('tail=%j\n %s', tail, tail, pl, re) + const t = pl.type === '*' ? star + : pl.type === '?' ? qmark + : '\\' + pl.type + + hasMagic = true + re = re.slice(0, pl.reStart) + t + '\\(' + tail + } + + // handle trailing things that only matter at the very end. + clearStateChar() + if (escaping) { + // trailing \\ + re += '\\\\' + } + + // only need to apply the nodot start if the re starts with + // something that could conceivably capture a dot + const addPatternStart = addPatternStartSet[re.charAt(0)] + + // Hack to work around lack of negative lookbehind in JS + // A pattern like: *.!(x).!(y|z) needs to ensure that a name + // like 'a.xyz.yz' doesn't match. So, the first negative + // lookahead, has to look ALL the way ahead, to the end of + // the pattern. + for (let n = negativeLists.length - 1; n > -1; n--) { + const nl = negativeLists[n] + + const nlBefore = re.slice(0, nl.reStart) + const nlFirst = re.slice(nl.reStart, nl.reEnd - 8) + let nlAfter = re.slice(nl.reEnd) + const nlLast = re.slice(nl.reEnd - 8, nl.reEnd) + nlAfter + + // Handle nested stuff like *(*.js|!(*.json)), where open parens + // mean that we should *not* include the ) in the bit that is considered + // "after" the negated section. + const openParensBefore = nlBefore.split('(').length - 1 + let cleanAfter = nlAfter + for (let i = 0; i < openParensBefore; i++) { + cleanAfter = cleanAfter.replace(/\)[+*?]?/, '') + } + nlAfter = cleanAfter + + const dollar = nlAfter === '' && isSub !== SUBPARSE ? '$' : '' + re = nlBefore + nlFirst + nlAfter + dollar + nlLast + } + + // if the re is not "" at this point, then we need to make sure + // it doesn't match against an empty path part. + // Otherwise a/* will match a/, which it should not. + if (re !== '' && hasMagic) { + re = '(?=.)' + re + } + + if (addPatternStart) { + re = patternStart + re + } + + // parsing just a piece of a larger pattern. + if (isSub === SUBPARSE) { + return [re, hasMagic] + } + + // skip the regexp for non-magical patterns + // unescape anything in it, though, so that it'll be + // an exact match against a file etc. + if (!hasMagic) { + return globUnescape(pattern) + } + + const flags = options.nocase ? 'i' : '' + try { + return Object.assign(new RegExp('^' + re + '$', flags), { + _glob: pattern, + _src: re, + }) + } catch (er) /* istanbul ignore next - should be impossible */ { + // If it was an invalid regular expression, then it can't match + // anything. This trick looks for a character after the end of + // the string, which is of course impossible, except in multi-line + // mode, but it's not a /m regex. + return new RegExp('$.') + } + } + + makeRe () { + if (this.regexp || this.regexp === false) return this.regexp + + // at this point, this.set is a 2d array of partial + // pattern strings, or "**". + // + // It's better to use .match(). This function shouldn't + // be used, really, but it's pretty convenient sometimes, + // when you just want to work with a regex. + const set = this.set + + if (!set.length) { + this.regexp = false + return this.regexp + } + const options = this.options + + const twoStar = options.noglobstar ? star + : options.dot ? twoStarDot + : twoStarNoDot + const flags = options.nocase ? 'i' : '' + + // coalesce globstars and regexpify non-globstar patterns + // if it's the only item, then we just do one twoStar + // if it's the first, and there are more, prepend (\/|twoStar\/)? to next + // if it's the last, append (\/twoStar|) to previous + // if it's in the middle, append (\/|\/twoStar\/) to previous + // then filter out GLOBSTAR symbols + let re = set.map(pattern => { + pattern = pattern.map(p => + typeof p === 'string' ? regExpEscape(p) + : p === GLOBSTAR ? GLOBSTAR + : p._src + ).reduce((set, p) => { + if (!(set[set.length - 1] === GLOBSTAR && p === GLOBSTAR)) { + set.push(p) + } + return set + }, []) + pattern.forEach((p, i) => { + if (p !== GLOBSTAR || pattern[i-1] === GLOBSTAR) { + return + } + if (i === 0) { + if (pattern.length > 1) { + pattern[i+1] = '(?:\\\/|' + twoStar + '\\\/)?' + pattern[i+1] + } else { + pattern[i] = twoStar + } + } else if (i === pattern.length - 1) { + pattern[i-1] += '(?:\\\/|' + twoStar + ')?' + } else { + pattern[i-1] += '(?:\\\/|\\\/' + twoStar + '\\\/)' + pattern[i+1] + pattern[i+1] = GLOBSTAR + } + }) + return pattern.filter(p => p !== GLOBSTAR).join('/') + }).join('|') + + // must match entire pattern + // ending in a * or ** will make it less strict. + re = '^(?:' + re + ')$' + + // can match anything, as long as it's not this. + if (this.negate) re = '^(?!' + re + ').*$' + + try { + this.regexp = new RegExp(re, flags) + } catch (ex) /* istanbul ignore next - should be impossible */ { + this.regexp = false + } + return this.regexp + } + + match (f, partial = this.partial) { + this.debug('match', f, this.pattern) + // short-circuit in the case of busted things. + // comments, etc. + if (this.comment) return false + if (this.empty) return f === '' + + if (f === '/' && partial) return true + + const options = this.options + + // windows: need to use /, not \ + if (path.sep !== '/') { + f = f.split(path.sep).join('/') + } + + // treat the test path as a set of pathparts. + f = f.split(slashSplit) + this.debug(this.pattern, 'split', f) + + // just ONE of the pattern sets in this.set needs to match + // in order for it to be valid. If negating, then just one + // match means that we have failed. + // Either way, return on the first hit. + + const set = this.set + this.debug(this.pattern, 'set', set) + + // Find the basename of the path by looking for the last non-empty segment + let filename + for (let i = f.length - 1; i >= 0; i--) { + filename = f[i] + if (filename) break + } + + for (let i = 0; i < set.length; i++) { + const pattern = set[i] + let file = f + if (options.matchBase && pattern.length === 1) { + file = [filename] + } + const hit = this.matchOne(file, pattern, partial) + if (hit) { + if (options.flipNegate) return true + return !this.negate + } + } + + // didn't get any hits. this is success if it's a negative + // pattern, failure otherwise. + if (options.flipNegate) return false + return this.negate + } + + static defaults (def) { + return minimatch.defaults(def).Minimatch + } +} + +minimatch.Minimatch = Minimatch diff --git a/deps/npm/node_modules/mute-stream/mute.js b/deps/npm/node_modules/mute-stream/mute.js new file mode 100644 index 00000000000000..a24fc09975bb32 --- /dev/null +++ b/deps/npm/node_modules/mute-stream/mute.js @@ -0,0 +1,145 @@ +var Stream = require('stream') + +module.exports = MuteStream + +// var out = new MuteStream(process.stdout) +// argument auto-pipes +function MuteStream (opts) { + Stream.apply(this) + opts = opts || {} + this.writable = this.readable = true + this.muted = false + this.on('pipe', this._onpipe) + this.replace = opts.replace + + // For readline-type situations + // This much at the start of a line being redrawn after a ctrl char + // is seen (such as backspace) won't be redrawn as the replacement + this._prompt = opts.prompt || null + this._hadControl = false +} + +MuteStream.prototype = Object.create(Stream.prototype) + +Object.defineProperty(MuteStream.prototype, 'constructor', { + value: MuteStream, + enumerable: false +}) + +MuteStream.prototype.mute = function () { + this.muted = true +} + +MuteStream.prototype.unmute = function () { + this.muted = false +} + +Object.defineProperty(MuteStream.prototype, '_onpipe', { + value: onPipe, + enumerable: false, + writable: true, + configurable: true +}) + +function onPipe (src) { + this._src = src +} + +Object.defineProperty(MuteStream.prototype, 'isTTY', { + get: getIsTTY, + set: setIsTTY, + enumerable: true, + configurable: true +}) + +function getIsTTY () { + return( (this._dest) ? this._dest.isTTY + : (this._src) ? this._src.isTTY + : false + ) +} + +// basically just get replace the getter/setter with a regular value +function setIsTTY (isTTY) { + Object.defineProperty(this, 'isTTY', { + value: isTTY, + enumerable: true, + writable: true, + configurable: true + }) +} + +Object.defineProperty(MuteStream.prototype, 'rows', { + get: function () { + return( this._dest ? this._dest.rows + : this._src ? this._src.rows + : undefined ) + }, enumerable: true, configurable: true }) + +Object.defineProperty(MuteStream.prototype, 'columns', { + get: function () { + return( this._dest ? this._dest.columns + : this._src ? this._src.columns + : undefined ) + }, enumerable: true, configurable: true }) + + +MuteStream.prototype.pipe = function (dest, options) { + this._dest = dest + return Stream.prototype.pipe.call(this, dest, options) +} + +MuteStream.prototype.pause = function () { + if (this._src) return this._src.pause() +} + +MuteStream.prototype.resume = function () { + if (this._src) return this._src.resume() +} + +MuteStream.prototype.write = function (c) { + if (this.muted) { + if (!this.replace) return true + if (c.match(/^\u001b/)) { + if(c.indexOf(this._prompt) === 0) { + c = c.substr(this._prompt.length); + c = c.replace(/./g, this.replace); + c = this._prompt + c; + } + this._hadControl = true + return this.emit('data', c) + } else { + if (this._prompt && this._hadControl && + c.indexOf(this._prompt) === 0) { + this._hadControl = false + this.emit('data', this._prompt) + c = c.substr(this._prompt.length) + } + c = c.toString().replace(/./g, this.replace) + } + } + this.emit('data', c) +} + +MuteStream.prototype.end = function (c) { + if (this.muted) { + if (c && this.replace) { + c = c.toString().replace(/./g, this.replace) + } else { + c = null + } + } + if (c) this.emit('data', c) + this.emit('end') +} + +function proxy (fn) { return function () { + var d = this._dest + var s = this._src + if (d && d[fn]) d[fn].apply(d, arguments) + if (s && s[fn]) s[fn].apply(s, arguments) +}} + +MuteStream.prototype.destroy = proxy('destroy') +MuteStream.prototype.destroySoon = proxy('destroySoon') +MuteStream.prototype.close = proxy('close') diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/LICENSE.md b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/LICENSE.md new file mode 100644 index 00000000000000..072bf20840acd6 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/LICENSE.md @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) Sindre Sorhus (https://sindresorhus.com) +Copyright (c) npm, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/lib/index.js new file mode 100644 index 00000000000000..5789bb127e0966 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/lib/index.js @@ -0,0 +1,185 @@ +const { dirname, join, resolve, relative, isAbsolute } = require('path') +const rimraf_ = require('rimraf') +const { promisify } = require('util') +const { + access: access_, + accessSync, + copyFile: copyFile_, + copyFileSync, + readdir: readdir_, + readdirSync, + rename: rename_, + renameSync, + stat: stat_, + statSync, + lstat: lstat_, + lstatSync, + symlink: symlink_, + symlinkSync, + readlink: readlink_, + readlinkSync, +} = require('fs') + +const access = promisify(access_) +const copyFile = promisify(copyFile_) +const readdir = promisify(readdir_) +const rename = promisify(rename_) +const stat = promisify(stat_) +const lstat = promisify(lstat_) +const symlink = promisify(symlink_) +const readlink = promisify(readlink_) +const rimraf = promisify(rimraf_) +const rimrafSync = rimraf_.sync + +const mkdirp = require('mkdirp') + +const pathExists = async path => { + try { + await access(path) + return true + } catch (er) { + return er.code !== 'ENOENT' + } +} + +const pathExistsSync = path => { + try { + accessSync(path) + return true + } catch (er) { + return er.code !== 'ENOENT' + } +} + +const moveFile = async (source, destination, options = {}, root = true, symlinks = []) => { + if (!source || !destination) { + throw new TypeError('`source` and `destination` file required') + } + + options = { + overwrite: true, + ...options, + } + + if (!options.overwrite && await pathExists(destination)) { + throw new Error(`The destination file exists: ${destination}`) + } + + await mkdirp(dirname(destination)) + + try { + await rename(source, destination) + } catch (error) { + if (error.code === 'EXDEV' || error.code === 'EPERM') { + const sourceStat = await lstat(source) + if (sourceStat.isDirectory()) { + const files = await readdir(source) + await Promise.all(files.map((file) => + moveFile(join(source, file), join(destination, file), options, false, symlinks) + )) + } else if (sourceStat.isSymbolicLink()) { + symlinks.push({ source, destination }) + } else { + await copyFile(source, destination) + } + } else { + throw error + } + } + + if (root) { + await Promise.all(symlinks.map(async ({ source: symSource, destination: symDestination }) => { + let target = await readlink(symSource) + // junction symlinks in windows will be absolute paths, so we need to + // make sure they point to the symlink destination + if (isAbsolute(target)) { + target = resolve(symDestination, relative(symSource, target)) + } + // try to determine what the actual file is so we can create the correct + // type of symlink in windows + let targetStat = 'file' + try { + targetStat = await stat(resolve(dirname(symSource), target)) + if (targetStat.isDirectory()) { + targetStat = 'junction' + } + } catch { + // targetStat remains 'file' + } + await symlink( + target, + symDestination, + targetStat + ) + })) + await rimraf(source) + } +} + +const moveFileSync = (source, destination, options = {}, root = true, symlinks = []) => { + if (!source || !destination) { + throw new TypeError('`source` and `destination` file required') + } + + options = { + overwrite: true, + ...options, + } + + if (!options.overwrite && pathExistsSync(destination)) { + throw new Error(`The destination file exists: ${destination}`) + } + + mkdirp.sync(dirname(destination)) + + try { + renameSync(source, destination) + } catch (error) { + if (error.code === 'EXDEV' || error.code === 'EPERM') { + const sourceStat = lstatSync(source) + if (sourceStat.isDirectory()) { + const files = readdirSync(source) + for (const file of files) { + moveFileSync(join(source, file), join(destination, file), options, false, symlinks) + } + } else if (sourceStat.isSymbolicLink()) { + symlinks.push({ source, destination }) + } else { + copyFileSync(source, destination) + } + } else { + throw error + } + } + + if (root) { + for (const { source: symSource, destination: symDestination } of symlinks) { + let target = readlinkSync(symSource) + // junction symlinks in windows will be absolute paths, so we need to + // make sure they point to the symlink destination + if (isAbsolute(target)) { + target = resolve(symDestination, relative(symSource, target)) + } + // try to determine what the actual file is so we can create the correct + // type of symlink in windows + let targetStat = 'file' + try { + targetStat = statSync(resolve(dirname(symSource), target)) + if (targetStat.isDirectory()) { + targetStat = 'junction' + } + } catch { + // targetStat remains 'file' + } + symlinkSync( + target, + symDestination, + targetStat + ) + } + rimrafSync(source) + } +} + +module.exports = moveFile +module.exports.sync = moveFileSync diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/package.json b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/package.json new file mode 100644 index 00000000000000..58793b93a9ca0f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/move-file/package.json @@ -0,0 +1,47 @@ +{ + "name": "@npmcli/move-file", + "version": "2.0.1", + "files": [ + "bin/", + "lib/" + ], + "main": "lib/index.js", + "description": "move a file (fork of move-file)", + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, + "devDependencies": { + "@npmcli/eslint-config": "^3.0.1", + "@npmcli/template-oss": "3.5.0", + "tap": "^16.0.1" + }, + "scripts": { + "test": "tap", + "snap": "tap", + "preversion": "npm test", + "postversion": "npm publish", + "prepublishOnly": "git push origin --follow-tags", + "lint": "eslint \"**/*.js\"", + "postlint": "template-oss-check", + "template-oss-apply": "template-oss-apply --force", + "lintfix": "npm run lint -- --fix", + "posttest": "npm run lint" + }, + "repository": { + "type": "git", + "url": "https://github.com/npm/move-file.git" + }, + "tap": { + "check-coverage": true + }, + "license": "MIT", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "author": "GitHub Inc.", + "templateOSS": { + "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", + "version": "3.5.0" + } +} diff --git a/deps/npm/node_modules/npm-user-validate/npm-user-validate.js b/deps/npm/node_modules/npm-user-validate/npm-user-validate.js new file mode 100644 index 00000000000000..ffd8791c7eb95d --- /dev/null +++ b/deps/npm/node_modules/npm-user-validate/npm-user-validate.js @@ -0,0 +1,61 @@ +exports.email = email +exports.pw = pw +exports.username = username +var requirements = exports.requirements = { + username: { + length: 'Name length must be less than or equal to 214 characters long', + lowerCase: 'Name must be lowercase', + urlSafe: 'Name may not contain non-url-safe chars', + dot: 'Name may not start with "."', + illegal: 'Name may not contain illegal character' + }, + password: {}, + email: { + length: 'Email length must be less then or equal to 254 characters long', + valid: 'Email must be an email address' + } +} + +var illegalCharacterRe = new RegExp('([' + [ + "'" +].join() + '])') + +function username (un) { + if (un !== un.toLowerCase()) { + return new Error(requirements.username.lowerCase) + } + + if (un !== encodeURIComponent(un)) { + return new Error(requirements.username.urlSafe) + } + + if (un.charAt(0) === '.') { + return new Error(requirements.username.dot) + } + + if (un.length > 214) { + return new Error(requirements.username.length) + } + + var illegal = un.match(illegalCharacterRe) + if (illegal) { + return new Error(requirements.username.illegal + ' "' + illegal[0] + '"') + } + + return null +} + +function email (em) { + if (em.length > 254) { + return new Error(requirements.email.length) + } + if (!em.match(/^[^@]+@.+\..+$/)) { + return new Error(requirements.email.valid) + } + + return null +} + +function pw (pw) { + return null +} diff --git a/deps/npm/node_modules/promzard/example/buffer.js b/deps/npm/node_modules/promzard/example/buffer.js new file mode 100644 index 00000000000000..828f9d1df9da2f --- /dev/null +++ b/deps/npm/node_modules/promzard/example/buffer.js @@ -0,0 +1,12 @@ +var pz = require('../promzard') + +var path = require('path') +var file = path.resolve(__dirname, 'substack-input.js') +var buf = require('fs').readFileSync(file) +var ctx = { basename: path.basename(path.dirname(file)) } + +pz.fromBuffer(buf, ctx, function (er, res) { + if (er) + throw er + console.error(JSON.stringify(res, null, 2)) +}) diff --git a/deps/npm/node_modules/promzard/example/index.js b/deps/npm/node_modules/promzard/example/index.js new file mode 100644 index 00000000000000..435131f3a6d1e2 --- /dev/null +++ b/deps/npm/node_modules/promzard/example/index.js @@ -0,0 +1,11 @@ +var pz = require('../promzard') + +var path = require('path') +var file = path.resolve(__dirname, 'substack-input.js') +var ctx = { basename: path.basename(path.dirname(file)) } + +pz(file, ctx, function (er, res) { + if (er) + throw er + console.error(JSON.stringify(res, null, 2)) +}) diff --git a/deps/npm/node_modules/promzard/example/npm-init/init-input.js b/deps/npm/node_modules/promzard/example/npm-init/init-input.js new file mode 100644 index 00000000000000..ba7781b3539e4d --- /dev/null +++ b/deps/npm/node_modules/promzard/example/npm-init/init-input.js @@ -0,0 +1,191 @@ +var fs = require('fs') +var path = require('path'); + +module.exports = { + "name" : prompt('name', + typeof name === 'undefined' + ? basename.replace(/^node-|[.-]js$/g, ''): name), + "version" : prompt('version', typeof version !== "undefined" + ? version : '0.0.0'), + "description" : (function () { + if (typeof description !== 'undefined' && description) { + return description + } + var value; + try { + var src = fs.readFileSync('README.md', 'utf8'); + value = src.split('\n').filter(function (line) { + return /\s+/.test(line) + && line.trim() !== basename.replace(/^node-/, '') + && !line.trim().match(/^#/) + ; + })[0] + .trim() + .replace(/^./, function (c) { return c.toLowerCase() }) + .replace(/\.$/, '') + ; + } + catch (e) { + try { + // Wouldn't it be nice if that file mattered? + var d = fs.readFileSync('.git/description', 'utf8') + } catch (e) {} + if (d.trim() && !value) value = d + } + return prompt('description', value); + })(), + "main" : (function () { + var f + try { + f = fs.readdirSync(dirname).filter(function (f) { + return f.match(/\.js$/) + }) + if (f.indexOf('index.js') !== -1) + f = 'index.js' + else if (f.indexOf('main.js') !== -1) + f = 'main.js' + else if (f.indexOf(basename + '.js') !== -1) + f = basename + '.js' + else + f = f[0] + } catch (e) {} + + return prompt('entry point', f || 'index.js') + })(), + "bin" : function (cb) { + fs.readdir(dirname + '/bin', function (er, d) { + // no bins + if (er) return cb() + // just take the first js file we find there, or nada + return cb(null, d.filter(function (f) { + return f.match(/\.js$/) + })[0]) + }) + }, + "directories" : function (cb) { + fs.readdir('.', function (er, dirs) { + if (er) return cb(er) + var res = {} + dirs.forEach(function (d) { + switch (d) { + case 'example': case 'examples': return res.example = d + case 'test': case 'tests': return res.test = d + case 'doc': case 'docs': return res.doc = d + case 'man': return res.man = d + } + }) + if (Object.keys(res).length === 0) res = undefined + return cb(null, res) + }) + }, + "dependencies" : typeof dependencies !== 'undefined' ? dependencies + : function (cb) { + fs.readdir('node_modules', function (er, dir) { + if (er) return cb() + var deps = {} + var n = dir.length + dir.forEach(function (d) { + if (d.match(/^\./)) return next() + if (d.match(/^(expresso|mocha|tap|coffee-script|coco|streamline)$/)) + return next() + fs.readFile('node_modules/' + d + '/package.json', function (er, p) { + if (er) return next() + try { p = JSON.parse(p) } catch (e) { return next() } + if (!p.version) return next() + deps[d] = '~' + p.version + return next() + }) + }) + function next () { + if (--n === 0) return cb(null, deps) + } + }) + }, + "devDependencies" : typeof devDependencies !== 'undefined' ? devDependencies + : function (cb) { + // same as dependencies but for dev deps + fs.readdir('node_modules', function (er, dir) { + if (er) return cb() + var deps = {} + var n = dir.length + dir.forEach(function (d) { + if (d.match(/^\./)) return next() + if (!d.match(/^(expresso|mocha|tap|coffee-script|coco|streamline)$/)) + return next() + fs.readFile('node_modules/' + d + '/package.json', function (er, p) { + if (er) return next() + try { p = JSON.parse(p) } catch (e) { return next() } + if (!p.version) return next() + deps[d] = '~' + p.version + return next() + }) + }) + function next () { + if (--n === 0) return cb(null, deps) + } + }) + }, + "scripts" : (function () { + // check to see what framework is in use, if any + try { var d = fs.readdirSync('node_modules') } + catch (e) { d = [] } + var s = typeof scripts === 'undefined' ? {} : scripts + + if (d.indexOf('coffee-script') !== -1) + s.prepublish = prompt('build command', + s.prepublish || 'coffee src/*.coffee -o lib') + + var notest = 'echo "Error: no test specified" && exit 1' + function tx (test) { + return test || notest + } + + if (!s.test || s.test === notest) { + if (d.indexOf('tap') !== -1) + s.test = prompt('test command', 'tap test/*.js', tx) + else if (d.indexOf('expresso') !== -1) + s.test = prompt('test command', 'expresso test', tx) + else if (d.indexOf('mocha') !== -1) + s.test = prompt('test command', 'mocha', tx) + else + s.test = prompt('test command', tx) + } + + return s + + })(), + + "repository" : (function () { + try { var gconf = fs.readFileSync('.git/config') } + catch (e) { gconf = null } + if (gconf) { + gconf = gconf.split(/\r?\n/) + var i = gconf.indexOf('[remote "origin"]') + if (i !== -1) { + var u = gconf[i + 1] + if (!u.match(/^\s*url =/)) u = gconf[i + 2] + if (!u.match(/^\s*url =/)) u = null + else u = u.replace(/^\s*url = /, '') + } + if (u && u.match(/^git@github.com:/)) + u = u.replace(/^git@github.com:/, 'git://github.com/') + } + + return prompt('git repository', u) + })(), + + "keywords" : prompt(function (s) { + if (!s) return undefined + if (Array.isArray(s)) s = s.join(' ') + if (typeof s !== 'string') return s + return s.split(/[\s,]+/) + }), + "author" : config['init.author.name'] + ? { + "name" : config['init.author.name'], + "email" : config['init.author.email'], + "url" : config['init.author.url'] + } + : undefined, + "license" : prompt('license', 'BSD') +} diff --git a/deps/npm/node_modules/promzard/example/npm-init/init.js b/deps/npm/node_modules/promzard/example/npm-init/init.js new file mode 100644 index 00000000000000..09484cd1c19911 --- /dev/null +++ b/deps/npm/node_modules/promzard/example/npm-init/init.js @@ -0,0 +1,37 @@ +var PZ = require('../../promzard').PromZard +var path = require('path') +var input = path.resolve(__dirname, 'init-input.js') + +var fs = require('fs') +var package = path.resolve(__dirname, 'package.json') +var pkg + +fs.readFile(package, 'utf8', function (er, d) { + if (er) ctx = {} + try { ctx = JSON.parse(d); pkg = JSON.parse(d) } + catch (e) { ctx = {} } + + ctx.dirname = path.dirname(package) + ctx.basename = path.basename(ctx.dirname) + if (!ctx.version) ctx.version = undefined + + // this should be replaced with the npm conf object + ctx.config = {} + + console.error('ctx=', ctx) + + var pz = new PZ(input, ctx) + + pz.on('data', function (data) { + console.error('pz data', data) + if (!pkg) pkg = {} + Object.keys(data).forEach(function (k) { + if (data[k] !== undefined && data[k] !== null) pkg[k] = data[k] + }) + console.error('package data %s', JSON.stringify(data, null, 2)) + fs.writeFile(package, JSON.stringify(pkg, null, 2), function (er) { + if (er) throw er + console.log('ok') + }) + }) +}) diff --git a/deps/npm/node_modules/promzard/example/npm-init/package.json b/deps/npm/node_modules/promzard/example/npm-init/package.json new file mode 100644 index 00000000000000..89c6d1fb6e2acf --- /dev/null +++ b/deps/npm/node_modules/promzard/example/npm-init/package.json @@ -0,0 +1,10 @@ +{ + "name": "npm-init", + "version": "0.0.0", + "description": "an initter you init wit, innit?", + "main": "index.js", + "scripts": { + "test": "asdf" + }, + "license": "BSD" +} \ No newline at end of file diff --git a/deps/npm/node_modules/promzard/example/substack-input.js b/deps/npm/node_modules/promzard/example/substack-input.js new file mode 100644 index 00000000000000..bf7aedb82d41fd --- /dev/null +++ b/deps/npm/node_modules/promzard/example/substack-input.js @@ -0,0 +1,61 @@ +module.exports = { + "name" : basename.replace(/^node-/, ''), + "version" : "0.0.0", + "description" : (function (cb) { + var fs = require('fs'); + var value; + try { + var src = fs.readFileSync('README.markdown', 'utf8'); + value = src.split('\n').filter(function (line) { + return /\s+/.test(line) + && line.trim() !== basename.replace(/^node-/, '') + ; + })[0] + .trim() + .replace(/^./, function (c) { return c.toLowerCase() }) + .replace(/\.$/, '') + ; + } + catch (e) {} + + return prompt('description', value); + })(), + "main" : prompt('entry point', 'index.js'), + "bin" : function (cb) { + var path = require('path'); + var fs = require('fs'); + var exists = fs.exists || path.exists; + exists('bin/cmd.js', function (ex) { + var bin + if (ex) { + var bin = {} + bin[basename.replace(/^node-/, '')] = 'bin/cmd.js' + } + cb(null, bin); + }); + }, + "directories" : { + "example" : "example", + "test" : "test" + }, + "dependencies" : {}, + "devDependencies" : { + "tap" : "~0.2.5" + }, + "scripts" : { + "test" : "tap test/*.js" + }, + "repository" : { + "type" : "git", + "url" : "git://github.com/substack/" + basename + ".git" + }, + "homepage" : "https://github.com/substack/" + basename, + "keywords" : prompt(function (s) { return s.split(/\s+/) }), + "author" : { + "name" : "James Halliday", + "email" : "mail@substack.net", + "url" : "http://substack.net" + }, + "license" : "MIT", + "engine" : { "node" : ">=0.6" } +} diff --git a/deps/npm/node_modules/promzard/promzard.js b/deps/npm/node_modules/promzard/promzard.js new file mode 100644 index 00000000000000..da1abca9535e4f --- /dev/null +++ b/deps/npm/node_modules/promzard/promzard.js @@ -0,0 +1,238 @@ +module.exports = promzard +promzard.PromZard = PromZard + +var fs = require('fs') +var vm = require('vm') +var util = require('util') +var files = {} +var crypto = require('crypto') +var EventEmitter = require('events').EventEmitter +var read = require('read') + +var Module = require('module').Module +var path = require('path') + +function promzard (file, ctx, cb) { + if (typeof ctx === 'function') cb = ctx, ctx = null; + if (!ctx) ctx = {}; + var pz = new PromZard(file, ctx) + pz.on('error', cb) + pz.on('data', function (data) { + cb(null, data) + }) +} +promzard.fromBuffer = function (buf, ctx, cb) { + var filename = 0 + do { + filename = '\0' + Math.random(); + } while (files[filename]) + files[filename] = buf + var ret = promzard(filename, ctx, cb) + delete files[filename] + return ret +} + +function PromZard (file, ctx) { + if (!(this instanceof PromZard)) + return new PromZard(file, ctx) + EventEmitter.call(this) + this.file = file + this.ctx = ctx + this.unique = crypto.randomBytes(8).toString('hex') + this.load() +} + +PromZard.prototype = Object.create( + EventEmitter.prototype, + { constructor: { + value: PromZard, + readable: true, + configurable: true, + writable: true, + enumerable: false } } ) + +PromZard.prototype.load = function () { + if (files[this.file]) + return this.loaded() + + fs.readFile(this.file, 'utf8', function (er, d) { + if (er && this.backupFile) { + this.file = this.backupFile + delete this.backupFile + return this.load() + } + if (er) + return this.emit('error', this.error = er) + files[this.file] = d + this.loaded() + }.bind(this)) +} + +PromZard.prototype.loaded = function () { + this.ctx.prompt = this.makePrompt() + this.ctx.__filename = this.file + this.ctx.__dirname = path.dirname(this.file) + this.ctx.__basename = path.basename(this.file) + var mod = this.ctx.module = this.makeModule() + this.ctx.require = function (path) { + return mod.require(path) + } + this.ctx.require.resolve = function(path) { + return Module._resolveFilename(path, mod); + } + this.ctx.exports = mod.exports + + this.script = this.wrap(files[this.file]) + var fn = vm.runInThisContext(this.script, this.file) + var args = Object.keys(this.ctx).map(function (k) { + return this.ctx[k] + }.bind(this)) + try { var res = fn.apply(this.ctx, args) } + catch (er) { this.emit('error', er) } + if (res && + typeof res === 'object' && + exports === mod.exports && + Object.keys(exports).length === 1) { + this.result = res + } else { + this.result = mod.exports + } + this.walk() +} + +PromZard.prototype.makeModule = function () { + var mod = new Module(this.file, module) + mod.loaded = true + mod.filename = this.file + mod.id = this.file + mod.paths = Module._nodeModulePaths(path.dirname(this.file)) + return mod +} + +PromZard.prototype.wrap = function (body) { + var s = '(function( %s ) { %s\n })' + var args = Object.keys(this.ctx).join(', ') + return util.format(s, args, body) +} + +PromZard.prototype.makePrompt = function () { + this.prompts = [] + return prompt.bind(this) + function prompt () { + var p, d, t + for (var i = 0; i < arguments.length; i++) { + var a = arguments[i] + if (typeof a === 'string' && p) + d = a + else if (typeof a === 'string') + p = a + else if (typeof a === 'function') + t = a + else if (a && typeof a === 'object') { + p = a.prompt || p + d = a.default || d + t = a.transform || t + } + } + + try { return this.unique + '-' + this.prompts.length } + finally { this.prompts.push([p, d, t]) } + } +} + +PromZard.prototype.walk = function (o, cb) { + o = o || this.result + cb = cb || function (er, res) { + if (er) + return this.emit('error', this.error = er) + this.result = res + return this.emit('data', res) + } + cb = cb.bind(this) + var keys = Object.keys(o) + var i = 0 + var len = keys.length + + L.call(this) + function L () { + if (this.error) + return + while (i < len) { + var k = keys[i] + var v = o[k] + i++ + + if (v && typeof v === 'object') { + return this.walk(v, function (er, res) { + if (er) return cb(er) + o[k] = res + L.call(this) + }.bind(this)) + } else if (v && + typeof v === 'string' && + v.indexOf(this.unique) === 0) { + var n = +v.substr(this.unique.length + 1) + var prompt = this.prompts[n] + if (isNaN(n) || !prompt) + continue + + // default to the key + if (undefined === prompt[0]) + prompt[0] = k + + // default to the ctx value, if there is one + if (undefined === prompt[1]) + prompt[1] = this.ctx[k] + + return this.prompt(prompt, function (er, res) { + if (er) { + if (!er.notValid) { + return this.emit('error', this.error = er); + } + console.log(er.message) + i -- + return L.call(this) + } + o[k] = res + L.call(this) + }.bind(this)) + } else if (typeof v === 'function') { + try { return v.call(this.ctx, function (er, res) { + if (er) + return this.emit('error', this.error = er) + o[k] = res + // back up so that we process this one again. + // this is because it might return a prompt() call in the cb. + i -- + L.call(this) + }.bind(this)) } + catch (er) { this.emit('error', er) } + } + } + // made it to the end of the loop, maybe + if (i >= len) + return cb(null, o) + } +} + +PromZard.prototype.prompt = function (pdt, cb) { + var prompt = pdt[0] + var def = pdt[1] + var tx = pdt[2] + + if (tx) { + cb = function (cb) { return function (er, data) { + try { + var res = tx(data) + if (!er && res instanceof Error && !!res.notValid) { + return cb(res, null) + } + return cb(er, res) + } + catch (er) { this.emit('error', er) } + }}(cb).bind(this) + } + + read({ prompt: prompt + ':' , default: def }, cb) +} + diff --git a/deps/npm/node_modules/promzard/test/basic.js b/deps/npm/node_modules/promzard/test/basic.js new file mode 100644 index 00000000000000..ad1c92df9c4c05 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/basic.js @@ -0,0 +1,91 @@ +var tap = require('tap') +var pz = require('../promzard.js') +var spawn = require('child_process').spawn + +tap.test('run the example', function (t) { + + var example = require.resolve('../example/index.js') + var node = process.execPath + + var expect = { + "name": "example", + "version": "0.0.0", + "description": "testing description", + "main": "test-entry.js", + "directories": { + "example": "example", + "test": "test" + }, + "dependencies": {}, + "devDependencies": { + "tap": "~0.2.5" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/substack/example.git" + }, + "homepage": "https://github.com/substack/example", + "keywords": [ + "fugazi", + "function", + "waiting", + "room" + ], + "author": { + "name": "James Halliday", + "email": "mail@substack.net", + "url": "http://substack.net" + }, + "license": "MIT", + "engine": { + "node": ">=0.6" + } + } + + console.error('%s %s', node, example) + var c = spawn(node, [example], { customFds: [-1,-1,-1] }) + var output = '' + c.stdout.on('data', function (d) { + output += d + respond() + }) + + var actual = '' + c.stderr.on('data', function (d) { + actual += d + }) + + function respond () { + console.error('respond', output) + if (output.match(/description: $/)) { + c.stdin.write('testing description\n') + return + } + if (output.match(/entry point: \(index\.js\) $/)) { + c.stdin.write('test-entry.js\n') + return + } + if (output.match(/keywords: $/)) { + c.stdin.write('fugazi function waiting room\n') + // "read" module is weird on node >= 0.10 when not a TTY + // requires explicit ending for reasons. + // could dig in, but really just wanna make tests pass, whatever. + c.stdin.end() + return + } + } + + c.on('exit', function () { + console.error('exit event') + }) + + c.on('close', function () { + console.error('actual', actual) + actual = JSON.parse(actual) + t.deepEqual(actual, expect) + t.end() + }) +}) diff --git a/deps/npm/node_modules/promzard/test/buffer.js b/deps/npm/node_modules/promzard/test/buffer.js new file mode 100644 index 00000000000000..e1d240e2e4f480 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/buffer.js @@ -0,0 +1,84 @@ +var tap = require('tap') +var pz = require('../promzard.js') +var spawn = require('child_process').spawn + +tap.test('run the example using a buffer', function (t) { + + var example = require.resolve('../example/buffer.js') + var node = process.execPath + + var expect = { + "name": "example", + "version": "0.0.0", + "description": "testing description", + "main": "test-entry.js", + "directories": { + "example": "example", + "test": "test" + }, + "dependencies": {}, + "devDependencies": { + "tap": "~0.2.5" + }, + "scripts": { + "test": "tap test/*.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/substack/example.git" + }, + "homepage": "https://github.com/substack/example", + "keywords": [ + "fugazi", + "function", + "waiting", + "room" + ], + "author": { + "name": "James Halliday", + "email": "mail@substack.net", + "url": "http://substack.net" + }, + "license": "MIT", + "engine": { + "node": ">=0.6" + } + } + + var c = spawn(node, [example], { customFds: [-1,-1,-1] }) + var output = '' + c.stdout.on('data', function (d) { + output += d + respond() + }) + + var actual = '' + c.stderr.on('data', function (d) { + actual += d + }) + + function respond () { + if (output.match(/description: $/)) { + c.stdin.write('testing description\n') + return + } + if (output.match(/entry point: \(index\.js\) $/)) { + c.stdin.write('test-entry.js\n') + return + } + if (output.match(/keywords: $/)) { + c.stdin.write('fugazi function waiting room\n') + // "read" module is weird on node >= 0.10 when not a TTY + // requires explicit ending for reasons. + // could dig in, but really just wanna make tests pass, whatever. + c.stdin.end() + return + } + } + + c.on('close', function () { + actual = JSON.parse(actual) + t.deepEqual(actual, expect) + t.end() + }) +}) diff --git a/deps/npm/node_modules/promzard/test/exports.input b/deps/npm/node_modules/promzard/test/exports.input new file mode 100644 index 00000000000000..061cbfe1055aa2 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/exports.input @@ -0,0 +1,5 @@ +exports.a = 1 + 2 +exports.b = prompt('To be or not to be?', '!2b') +exports.c = {} +exports.c.x = prompt() +exports.c.y = tmpdir + "/y/file.txt" diff --git a/deps/npm/node_modules/promzard/test/exports.js b/deps/npm/node_modules/promzard/test/exports.js new file mode 100644 index 00000000000000..c17993a4e9e754 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/exports.js @@ -0,0 +1,48 @@ +var test = require('tap').test; +var promzard = require('../'); + +if (process.argv[2] === 'child') { + return child() +} + +test('exports', function (t) { + t.plan(1); + + var spawn = require('child_process').spawn + var child = spawn(process.execPath, [__filename, 'child']) + + var output = '' + child.stderr.on('data', function (c) { + output += c + }) + + setTimeout(function () { + child.stdin.write('\n'); + }, 100) + setTimeout(function () { + child.stdin.end('55\n'); + }, 200) + + child.on('close', function () { + console.error('output=%j', output) + output = JSON.parse(output) + t.same({ + a : 3, + b : '!2b', + c : { + x : 55, + y : '/tmp/y/file.txt', + } + }, output); + t.end() + }) +}); + +function child () { + var ctx = { tmpdir : '/tmp' } + var file = __dirname + '/exports.input'; + + promzard(file, ctx, function (err, output) { + console.error(JSON.stringify(output)) + }); +} diff --git a/deps/npm/node_modules/promzard/test/fn.input b/deps/npm/node_modules/promzard/test/fn.input new file mode 100644 index 00000000000000..ed6c3f1c80c5b5 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/fn.input @@ -0,0 +1,18 @@ +var fs = require('fs') + +module.exports = { + "a": 1 + 2, + "b": prompt('To be or not to be?', '!2b', function (s) { + return s.toUpperCase() + '...' + }), + "c": { + "x": prompt(function (x) { return x * 100 }), + "y": tmpdir + "/y/file.txt" + }, + a_function: function (cb) { + fs.readFile(__filename, 'utf8', cb) + }, + asyncPrompt: function (cb) { + return cb(null, prompt('a prompt at any other time would smell as sweet')) + } +} diff --git a/deps/npm/node_modules/promzard/test/fn.js b/deps/npm/node_modules/promzard/test/fn.js new file mode 100644 index 00000000000000..899ebedbdd010c --- /dev/null +++ b/deps/npm/node_modules/promzard/test/fn.js @@ -0,0 +1,56 @@ +var test = require('tap').test; +var promzard = require('../'); +var fs = require('fs') +var file = __dirname + '/fn.input'; + +var expect = { + a : 3, + b : '!2B...', + c : { + x : 5500, + y : '/tmp/y/file.txt', + } +} +expect.a_function = fs.readFileSync(file, 'utf8') +expect.asyncPrompt = 'async prompt' + +if (process.argv[2] === 'child') { + return child() +} + +test('prompt callback param', function (t) { + t.plan(1); + + var spawn = require('child_process').spawn + var child = spawn(process.execPath, [__filename, 'child']) + + var output = '' + child.stderr.on('data', function (c) { + output += c + }) + + child.on('close', function () { + console.error('output=%j', output) + output = JSON.parse(output) + t.same(output, expect); + t.end() + }) + + setTimeout(function () { + child.stdin.write('\n') + }, 100) + setTimeout(function () { + child.stdin.write('55\n') + }, 150) + setTimeout(function () { + child.stdin.end('async prompt\n') + }, 200) +}) + +function child () { + var ctx = { tmpdir : '/tmp' } + var file = __dirname + '/fn.input'; + promzard(file, ctx, function (err, output) { + console.error(JSON.stringify(output)) + }) +} diff --git a/deps/npm/node_modules/promzard/test/simple.input b/deps/npm/node_modules/promzard/test/simple.input new file mode 100644 index 00000000000000..e49def6470d599 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/simple.input @@ -0,0 +1,8 @@ +module.exports = { + "a": 1 + 2, + "b": prompt('To be or not to be?', '!2b'), + "c": { + "x": prompt(), + "y": tmpdir + "/y/file.txt" + } +} diff --git a/deps/npm/node_modules/promzard/test/simple.js b/deps/npm/node_modules/promzard/test/simple.js new file mode 100644 index 00000000000000..034a86475afbd5 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/simple.js @@ -0,0 +1,30 @@ +var test = require('tap').test; +var promzard = require('../'); + +test('simple', function (t) { + t.plan(1); + + var ctx = { tmpdir : '/tmp' } + var file = __dirname + '/simple.input'; + promzard(file, ctx, function (err, output) { + t.same( + { + a : 3, + b : '!2b', + c : { + x : 55, + y : '/tmp/y/file.txt', + } + }, + output + ); + }); + + setTimeout(function () { + process.stdin.emit('data', '\n'); + }, 100); + + setTimeout(function () { + process.stdin.emit('data', '55\n'); + }, 200); +}); diff --git a/deps/npm/node_modules/promzard/test/validate.input b/deps/npm/node_modules/promzard/test/validate.input new file mode 100644 index 00000000000000..839c0652294ac0 --- /dev/null +++ b/deps/npm/node_modules/promzard/test/validate.input @@ -0,0 +1,8 @@ +module.exports = { + "name": prompt("name", function (data) { + if (data === 'cool') return data + var er = new Error('not cool') + er.notValid = true + return er + }) +} diff --git a/deps/npm/node_modules/promzard/test/validate.js b/deps/npm/node_modules/promzard/test/validate.js new file mode 100644 index 00000000000000..a120681494e60d --- /dev/null +++ b/deps/npm/node_modules/promzard/test/validate.js @@ -0,0 +1,20 @@ + +var promzard = require('../') +var test = require('tap').test + +test('validate', function (t) { + t.plan(2) + var ctx = { tmpdir : '/tmp' } + var file = __dirname + '/validate.input' + promzard(file, ctx, function (er, found) { + t.ok(!er) + var wanted = { name: 'cool' } + t.same(found, wanted) + }) + setTimeout(function () { + process.stdin.emit('data', 'not cool\n') + }, 100) + setTimeout(function () { + process.stdin.emit('data', 'cool\n') + }, 200) +}) diff --git a/deps/npm/node_modules/readable-stream/CONTRIBUTING.md b/deps/npm/node_modules/readable-stream/CONTRIBUTING.md new file mode 100644 index 00000000000000..f478d58dca85b2 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/CONTRIBUTING.md @@ -0,0 +1,38 @@ +# Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +* (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +* (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +* (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +* (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + +## Moderation Policy + +The [Node.js Moderation Policy] applies to this WG. + +## Code of Conduct + +The [Node.js Code of Conduct][] applies to this WG. + +[Node.js Code of Conduct]: +https://github.com/nodejs/node/blob/master/CODE_OF_CONDUCT.md +[Node.js Moderation Policy]: +https://github.com/nodejs/TSC/blob/master/Moderation-Policy.md diff --git a/deps/npm/node_modules/readable-stream/GOVERNANCE.md b/deps/npm/node_modules/readable-stream/GOVERNANCE.md new file mode 100644 index 00000000000000..16ffb93f24bece --- /dev/null +++ b/deps/npm/node_modules/readable-stream/GOVERNANCE.md @@ -0,0 +1,136 @@ +### Streams Working Group + +The Node.js Streams is jointly governed by a Working Group +(WG) +that is responsible for high-level guidance of the project. + +The WG has final authority over this project including: + +* Technical direction +* Project governance and process (including this policy) +* Contribution policy +* GitHub repository hosting +* Conduct guidelines +* Maintaining the list of additional Collaborators + +For the current list of WG members, see the project +[README.md](./README.md#current-project-team-members). + +### Collaborators + +The readable-stream GitHub repository is +maintained by the WG and additional Collaborators who are added by the +WG on an ongoing basis. + +Individuals making significant and valuable contributions are made +Collaborators and given commit-access to the project. These +individuals are identified by the WG and their addition as +Collaborators is discussed during the WG meeting. + +_Note:_ If you make a significant contribution and are not considered +for commit-access log an issue or contact a WG member directly and it +will be brought up in the next WG meeting. + +Modifications of the contents of the readable-stream repository are +made on +a collaborative basis. Anybody with a GitHub account may propose a +modification via pull request and it will be considered by the project +Collaborators. All pull requests must be reviewed and accepted by a +Collaborator with sufficient expertise who is able to take full +responsibility for the change. In the case of pull requests proposed +by an existing Collaborator, an additional Collaborator is required +for sign-off. Consensus should be sought if additional Collaborators +participate and there is disagreement around a particular +modification. See _Consensus Seeking Process_ below for further detail +on the consensus model used for governance. + +Collaborators may opt to elevate significant or controversial +modifications, or modifications that have not found consensus to the +WG for discussion by assigning the ***WG-agenda*** tag to a pull +request or issue. The WG should serve as the final arbiter where +required. + +For the current list of Collaborators, see the project +[README.md](./README.md#members). + +### WG Membership + +WG seats are not time-limited. There is no fixed size of the WG. +However, the expected target is between 6 and 12, to ensure adequate +coverage of important areas of expertise, balanced with the ability to +make decisions efficiently. + +There is no specific set of requirements or qualifications for WG +membership beyond these rules. + +The WG may add additional members to the WG by unanimous consensus. + +A WG member may be removed from the WG by voluntary resignation, or by +unanimous consensus of all other WG members. + +Changes to WG membership should be posted in the agenda, and may be +suggested as any other agenda item (see "WG Meetings" below). + +If an addition or removal is proposed during a meeting, and the full +WG is not in attendance to participate, then the addition or removal +is added to the agenda for the subsequent meeting. This is to ensure +that all members are given the opportunity to participate in all +membership decisions. If a WG member is unable to attend a meeting +where a planned membership decision is being made, then their consent +is assumed. + +No more than 1/3 of the WG members may be affiliated with the same +employer. If removal or resignation of a WG member, or a change of +employment by a WG member, creates a situation where more than 1/3 of +the WG membership shares an employer, then the situation must be +immediately remedied by the resignation or removal of one or more WG +members affiliated with the over-represented employer(s). + +### WG Meetings + +The WG meets occasionally on a Google Hangout On Air. A designated moderator +approved by the WG runs the meeting. Each meeting should be +published to YouTube. + +Items are added to the WG agenda that are considered contentious or +are modifications of governance, contribution policy, WG membership, +or release process. + +The intention of the agenda is not to approve or review all patches; +that should happen continuously on GitHub and be handled by the larger +group of Collaborators. + +Any community member or contributor can ask that something be added to +the next meeting's agenda by logging a GitHub Issue. Any Collaborator, +WG member or the moderator can add the item to the agenda by adding +the ***WG-agenda*** tag to the issue. + +Prior to each WG meeting the moderator will share the Agenda with +members of the WG. WG members can add any items they like to the +agenda at the beginning of each meeting. The moderator and the WG +cannot veto or remove items. + +The WG may invite persons or representatives from certain projects to +participate in a non-voting capacity. + +The moderator is responsible for summarizing the discussion of each +agenda item and sends it as a pull request after the meeting. + +### Consensus Seeking Process + +The WG follows a +[Consensus +Seeking](http://en.wikipedia.org/wiki/Consensus-seeking_decision-making) +decision-making model. + +When an agenda item has appeared to reach a consensus the moderator +will ask "Does anyone object?" as a final call for dissent from the +consensus. + +If an agenda item cannot reach a consensus a WG member can call for +either a closing vote or a vote to table the issue to the next +meeting. The call for a vote must be seconded by a majority of the WG +or else the discussion will continue. Simple majority wins. + +Note that changes to WG membership require a majority consensus. See +"WG Membership" above. diff --git a/deps/npm/node_modules/readable-stream/errors-browser.js b/deps/npm/node_modules/readable-stream/errors-browser.js new file mode 100644 index 00000000000000..fb8e73e1893b10 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/errors-browser.js @@ -0,0 +1,127 @@ +'use strict'; + +function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } + +var codes = {}; + +function createErrorType(code, message, Base) { + if (!Base) { + Base = Error; + } + + function getMessage(arg1, arg2, arg3) { + if (typeof message === 'string') { + return message; + } else { + return message(arg1, arg2, arg3); + } + } + + var NodeError = + /*#__PURE__*/ + function (_Base) { + _inheritsLoose(NodeError, _Base); + + function NodeError(arg1, arg2, arg3) { + return _Base.call(this, getMessage(arg1, arg2, arg3)) || this; + } + + return NodeError; + }(Base); + + NodeError.prototype.name = Base.name; + NodeError.prototype.code = code; + codes[code] = NodeError; +} // https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js + + +function oneOf(expected, thing) { + if (Array.isArray(expected)) { + var len = expected.length; + expected = expected.map(function (i) { + return String(i); + }); + + if (len > 2) { + return "one of ".concat(thing, " ").concat(expected.slice(0, len - 1).join(', '), ", or ") + expected[len - 1]; + } else if (len === 2) { + return "one of ".concat(thing, " ").concat(expected[0], " or ").concat(expected[1]); + } else { + return "of ".concat(thing, " ").concat(expected[0]); + } + } else { + return "of ".concat(thing, " ").concat(String(expected)); + } +} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith + + +function startsWith(str, search, pos) { + return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; +} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith + + +function endsWith(str, search, this_len) { + if (this_len === undefined || this_len > str.length) { + this_len = str.length; + } + + return str.substring(this_len - search.length, this_len) === search; +} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes + + +function includes(str, search, start) { + if (typeof start !== 'number') { + start = 0; + } + + if (start + search.length > str.length) { + return false; + } else { + return str.indexOf(search, start) !== -1; + } +} + +createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) { + return 'The value "' + value + '" is invalid for option "' + name + '"'; +}, TypeError); +createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) { + // determiner: 'must be' or 'must not be' + var determiner; + + if (typeof expected === 'string' && startsWith(expected, 'not ')) { + determiner = 'must not be'; + expected = expected.replace(/^not /, ''); + } else { + determiner = 'must be'; + } + + var msg; + + if (endsWith(name, ' argument')) { + // For cases like 'first argument' + msg = "The ".concat(name, " ").concat(determiner, " ").concat(oneOf(expected, 'type')); + } else { + var type = includes(name, '.') ? 'property' : 'argument'; + msg = "The \"".concat(name, "\" ").concat(type, " ").concat(determiner, " ").concat(oneOf(expected, 'type')); + } + + msg += ". Received type ".concat(typeof actual); + return msg; +}, TypeError); +createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF'); +createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) { + return 'The ' + name + ' method is not implemented'; +}); +createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close'); +createErrorType('ERR_STREAM_DESTROYED', function (name) { + return 'Cannot call ' + name + ' after a stream was destroyed'; +}); +createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times'); +createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable'); +createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end'); +createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError); +createErrorType('ERR_UNKNOWN_ENCODING', function (arg) { + return 'Unknown encoding: ' + arg; +}, TypeError); +createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event'); +module.exports.codes = codes; diff --git a/deps/npm/node_modules/readable-stream/errors.js b/deps/npm/node_modules/readable-stream/errors.js new file mode 100644 index 00000000000000..8471526d6e7f75 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/errors.js @@ -0,0 +1,116 @@ +'use strict'; + +const codes = {}; + +function createErrorType(code, message, Base) { + if (!Base) { + Base = Error + } + + function getMessage (arg1, arg2, arg3) { + if (typeof message === 'string') { + return message + } else { + return message(arg1, arg2, arg3) + } + } + + class NodeError extends Base { + constructor (arg1, arg2, arg3) { + super(getMessage(arg1, arg2, arg3)); + } + } + + NodeError.prototype.name = Base.name; + NodeError.prototype.code = code; + + codes[code] = NodeError; +} + +// https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js +function oneOf(expected, thing) { + if (Array.isArray(expected)) { + const len = expected.length; + expected = expected.map((i) => String(i)); + if (len > 2) { + return `one of ${thing} ${expected.slice(0, len - 1).join(', ')}, or ` + + expected[len - 1]; + } else if (len === 2) { + return `one of ${thing} ${expected[0]} or ${expected[1]}`; + } else { + return `of ${thing} ${expected[0]}`; + } + } else { + return `of ${thing} ${String(expected)}`; + } +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith +function startsWith(str, search, pos) { + return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith +function endsWith(str, search, this_len) { + if (this_len === undefined || this_len > str.length) { + this_len = str.length; + } + return str.substring(this_len - search.length, this_len) === search; +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes +function includes(str, search, start) { + if (typeof start !== 'number') { + start = 0; + } + + if (start + search.length > str.length) { + return false; + } else { + return str.indexOf(search, start) !== -1; + } +} + +createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) { + return 'The value "' + value + '" is invalid for option "' + name + '"' +}, TypeError); +createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) { + // determiner: 'must be' or 'must not be' + let determiner; + if (typeof expected === 'string' && startsWith(expected, 'not ')) { + determiner = 'must not be'; + expected = expected.replace(/^not /, ''); + } else { + determiner = 'must be'; + } + + let msg; + if (endsWith(name, ' argument')) { + // For cases like 'first argument' + msg = `The ${name} ${determiner} ${oneOf(expected, 'type')}`; + } else { + const type = includes(name, '.') ? 'property' : 'argument'; + msg = `The "${name}" ${type} ${determiner} ${oneOf(expected, 'type')}`; + } + + msg += `. Received type ${typeof actual}`; + return msg; +}, TypeError); +createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF'); +createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) { + return 'The ' + name + ' method is not implemented' +}); +createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close'); +createErrorType('ERR_STREAM_DESTROYED', function (name) { + return 'Cannot call ' + name + ' after a stream was destroyed'; +}); +createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times'); +createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable'); +createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end'); +createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError); +createErrorType('ERR_UNKNOWN_ENCODING', function (arg) { + return 'Unknown encoding: ' + arg +}, TypeError); +createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event'); + +module.exports.codes = codes; diff --git a/deps/npm/node_modules/readable-stream/experimentalWarning.js b/deps/npm/node_modules/readable-stream/experimentalWarning.js new file mode 100644 index 00000000000000..78e841495bf24d --- /dev/null +++ b/deps/npm/node_modules/readable-stream/experimentalWarning.js @@ -0,0 +1,17 @@ +'use strict' + +var experimentalWarnings = new Set(); + +function emitExperimentalWarning(feature) { + if (experimentalWarnings.has(feature)) return; + var msg = feature + ' is an experimental feature. This feature could ' + + 'change at any time'; + experimentalWarnings.add(feature); + process.emitWarning(msg, 'ExperimentalWarning'); +} + +function noop() {} + +module.exports.emitExperimentalWarning = process.emitWarning + ? emitExperimentalWarning + : noop; diff --git a/deps/npm/node_modules/readable-stream/lib/internal/streams/async_iterator.js b/deps/npm/node_modules/readable-stream/lib/internal/streams/async_iterator.js new file mode 100644 index 00000000000000..9fb615a2f3bc44 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/lib/internal/streams/async_iterator.js @@ -0,0 +1,207 @@ +'use strict'; + +var _Object$setPrototypeO; + +function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } + +var finished = require('./end-of-stream'); + +var kLastResolve = Symbol('lastResolve'); +var kLastReject = Symbol('lastReject'); +var kError = Symbol('error'); +var kEnded = Symbol('ended'); +var kLastPromise = Symbol('lastPromise'); +var kHandlePromise = Symbol('handlePromise'); +var kStream = Symbol('stream'); + +function createIterResult(value, done) { + return { + value: value, + done: done + }; +} + +function readAndResolve(iter) { + var resolve = iter[kLastResolve]; + + if (resolve !== null) { + var data = iter[kStream].read(); // we defer if data is null + // we can be expecting either 'end' or + // 'error' + + if (data !== null) { + iter[kLastPromise] = null; + iter[kLastResolve] = null; + iter[kLastReject] = null; + resolve(createIterResult(data, false)); + } + } +} + +function onReadable(iter) { + // we wait for the next tick, because it might + // emit an error with process.nextTick + process.nextTick(readAndResolve, iter); +} + +function wrapForNext(lastPromise, iter) { + return function (resolve, reject) { + lastPromise.then(function () { + if (iter[kEnded]) { + resolve(createIterResult(undefined, true)); + return; + } + + iter[kHandlePromise](resolve, reject); + }, reject); + }; +} + +var AsyncIteratorPrototype = Object.getPrototypeOf(function () {}); +var ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = { + get stream() { + return this[kStream]; + }, + + next: function next() { + var _this = this; + + // if we have detected an error in the meanwhile + // reject straight away + var error = this[kError]; + + if (error !== null) { + return Promise.reject(error); + } + + if (this[kEnded]) { + return Promise.resolve(createIterResult(undefined, true)); + } + + if (this[kStream].destroyed) { + // We need to defer via nextTick because if .destroy(err) is + // called, the error will be emitted via nextTick, and + // we cannot guarantee that there is no error lingering around + // waiting to be emitted. + return new Promise(function (resolve, reject) { + process.nextTick(function () { + if (_this[kError]) { + reject(_this[kError]); + } else { + resolve(createIterResult(undefined, true)); + } + }); + }); + } // if we have multiple next() calls + // we will wait for the previous Promise to finish + // this logic is optimized to support for await loops, + // where next() is only called once at a time + + + var lastPromise = this[kLastPromise]; + var promise; + + if (lastPromise) { + promise = new Promise(wrapForNext(lastPromise, this)); + } else { + // fast path needed to support multiple this.push() + // without triggering the next() queue + var data = this[kStream].read(); + + if (data !== null) { + return Promise.resolve(createIterResult(data, false)); + } + + promise = new Promise(this[kHandlePromise]); + } + + this[kLastPromise] = promise; + return promise; + } +}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () { + return this; +}), _defineProperty(_Object$setPrototypeO, "return", function _return() { + var _this2 = this; + + // destroy(err, cb) is a private API + // we can guarantee we have that here, because we control the + // Readable class this is attached to + return new Promise(function (resolve, reject) { + _this2[kStream].destroy(null, function (err) { + if (err) { + reject(err); + return; + } + + resolve(createIterResult(undefined, true)); + }); + }); +}), _Object$setPrototypeO), AsyncIteratorPrototype); + +var createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) { + var _Object$create; + + var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, { + value: stream, + writable: true + }), _defineProperty(_Object$create, kLastResolve, { + value: null, + writable: true + }), _defineProperty(_Object$create, kLastReject, { + value: null, + writable: true + }), _defineProperty(_Object$create, kError, { + value: null, + writable: true + }), _defineProperty(_Object$create, kEnded, { + value: stream._readableState.endEmitted, + writable: true + }), _defineProperty(_Object$create, kHandlePromise, { + value: function value(resolve, reject) { + var data = iterator[kStream].read(); + + if (data) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + resolve(createIterResult(data, false)); + } else { + iterator[kLastResolve] = resolve; + iterator[kLastReject] = reject; + } + }, + writable: true + }), _Object$create)); + iterator[kLastPromise] = null; + finished(stream, function (err) { + if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') { + var reject = iterator[kLastReject]; // reject if we are waiting for data in the Promise + // returned by next() and store the error + + if (reject !== null) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + reject(err); + } + + iterator[kError] = err; + return; + } + + var resolve = iterator[kLastResolve]; + + if (resolve !== null) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + resolve(createIterResult(undefined, true)); + } + + iterator[kEnded] = true; + }); + stream.on('readable', onReadable.bind(null, iterator)); + return iterator; +}; + +module.exports = createReadableStreamAsyncIterator; \ No newline at end of file diff --git a/deps/npm/node_modules/readable-stream/lib/internal/streams/from-browser.js b/deps/npm/node_modules/readable-stream/lib/internal/streams/from-browser.js new file mode 100644 index 00000000000000..a4ce56f3c90f60 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/lib/internal/streams/from-browser.js @@ -0,0 +1,3 @@ +module.exports = function () { + throw new Error('Readable.from is not available in the browser') +}; diff --git a/deps/npm/node_modules/readable-stream/lib/internal/streams/stream-browser.js b/deps/npm/node_modules/readable-stream/lib/internal/streams/stream-browser.js new file mode 100644 index 00000000000000..9332a3fdae7060 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/lib/internal/streams/stream-browser.js @@ -0,0 +1 @@ +module.exports = require('events').EventEmitter; diff --git a/deps/npm/node_modules/readable-stream/lib/internal/streams/stream.js b/deps/npm/node_modules/readable-stream/lib/internal/streams/stream.js new file mode 100644 index 00000000000000..ce2ad5b6ee57f4 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/lib/internal/streams/stream.js @@ -0,0 +1 @@ +module.exports = require('stream'); diff --git a/deps/npm/node_modules/readable-stream/readable-browser.js b/deps/npm/node_modules/readable-stream/readable-browser.js new file mode 100644 index 00000000000000..adbf60de832f9d --- /dev/null +++ b/deps/npm/node_modules/readable-stream/readable-browser.js @@ -0,0 +1,9 @@ +exports = module.exports = require('./lib/_stream_readable.js'); +exports.Stream = exports; +exports.Readable = exports; +exports.Writable = require('./lib/_stream_writable.js'); +exports.Duplex = require('./lib/_stream_duplex.js'); +exports.Transform = require('./lib/_stream_transform.js'); +exports.PassThrough = require('./lib/_stream_passthrough.js'); +exports.finished = require('./lib/internal/streams/end-of-stream.js'); +exports.pipeline = require('./lib/internal/streams/pipeline.js'); diff --git a/deps/npm/node_modules/readable-stream/readable.js b/deps/npm/node_modules/readable-stream/readable.js new file mode 100644 index 00000000000000..9e0ca120ded827 --- /dev/null +++ b/deps/npm/node_modules/readable-stream/readable.js @@ -0,0 +1,16 @@ +var Stream = require('stream'); +if (process.env.READABLE_STREAM === 'disable' && Stream) { + module.exports = Stream.Readable; + Object.assign(module.exports, Stream); + module.exports.Stream = Stream; +} else { + exports = module.exports = require('./lib/_stream_readable.js'); + exports.Stream = Stream || exports; + exports.Readable = exports; + exports.Writable = require('./lib/_stream_writable.js'); + exports.Duplex = require('./lib/_stream_duplex.js'); + exports.Transform = require('./lib/_stream_transform.js'); + exports.PassThrough = require('./lib/_stream_passthrough.js'); + exports.finished = require('./lib/internal/streams/end-of-stream.js'); + exports.pipeline = require('./lib/internal/streams/pipeline.js'); +} diff --git a/deps/v8/.vpython b/deps/v8/.vpython new file mode 100644 index 00000000000000..d4a07677ca9a22 --- /dev/null +++ b/deps/v8/.vpython @@ -0,0 +1,91 @@ +# This is a vpython "spec" file. +# +# It describes patterns for python wheel dependencies of the python scripts in +# the V8 repo, particularly for dependencies that have compiled components +# (since pure-python dependencies can be easily vendored into third_party). +# +# When vpython is invoked, it finds this file and builds a python VirtualEnv, +# containing all of the dependencies described in this file, fetching them from +# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`, +# this never requires the end-user machine to have a working python extension +# compilation environment. All of these packages are built using: +# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/ +# +# All python scripts in the repo share this same spec, to avoid dependency +# fragmentation. +# +# If you have depot_tools installed in your $PATH, you can invoke python scripts +# in this repo by running them as you normally would run them, except +# substituting `vpython` instead of `python` on the command line, e.g.: +# vpython path/to/script.py some --arguments +# +# Read more about `vpython` and how to modify this file here: +# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md + +python_version: "2.7" + +# The default set of platforms vpython checks does not yet include mac-arm64. +# Setting `verify_pep425_tag` to the list of platforms we explicitly must support +# allows us to ensure that vpython specs stay mac-arm64-friendly +verify_pep425_tag: [ + {python: "cp27", abi: "cp27mu", platform: "manylinux1_x86_64"}, + {python: "cp27", abi: "cp27mu", platform: "linux_arm64"}, + {python: "cp27", abi: "cp27mu", platform: "linux_armv6l"}, + + {python: "cp27", abi: "cp27m", platform: "macosx_10_10_intel"}, + {python: "cp27", abi: "cp27m", platform: "macosx_11_0_arm64"}, + + {python: "cp27", abi: "cp27m", platform: "win32"}, + {python: "cp27", abi: "cp27m", platform: "win_amd64"} +] + +# Needed by third_party/catapult/devil/devil, which is imported by +# build/android/test_runner.py when running performance tests. +wheel: < + name: "infra/python/wheels/psutil/${vpython_platform}" + version: "version:5.2.2" +> + +# Used by: +# build/toolchain/win +wheel: < + name: "infra/python/wheels/pypiwin32/${vpython_platform}" + version: "version:219" + match_tag: < + platform: "win32" + > + match_tag: < + platform: "win_amd64" + > +> + +# Used by: +# tools/unittests/run_perf_test.py +wheel: < + name: "infra/python/wheels/coverage/${vpython_platform}" + version: "version:4.3.4" +> +wheel: < + name: "infra/python/wheels/six-py2_py3" + version: "version:1.10.0" +> +wheel: < + name: "infra/python/wheels/pbr-py2_py3" + version: "version:3.0.0" +> +wheel: < + name: "infra/python/wheels/funcsigs-py2_py3" + version: "version:1.0.2" +> +wheel: < + name: "infra/python/wheels/mock-py2_py3" + version: "version:2.0.0" +> + +# Used by: +# tools/run_perf.py +# tools/unittests/run_perf_test.py +wheel: < + name: "infra/python/wheels/numpy/${vpython_platform}" + version: "version:1.11.3" +> diff --git a/deps/v8/bazel/BUILD.zlib b/deps/v8/bazel/BUILD.zlib new file mode 100644 index 00000000000000..25a2c35313b981 --- /dev/null +++ b/deps/v8/bazel/BUILD.zlib @@ -0,0 +1,69 @@ +# Copyright 2021 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +cc_library( + name = "zlib", + srcs = [ + "adler32.c", + "chromeconf.h", + "compress.c", + "contrib/optimizations/insert_string.h", + "contrib/optimizations/slide_hash_neon.h", + "cpu_features.c", + "cpu_features.h", + "crc32.c", + "crc32.h", + "deflate.c", + "deflate.h", + "gzclose.c", + "gzguts.h", + "gzlib.c", + "gzread.c", + "gzwrite.c", + "infback.c", + "inffast.c", + "inffast.h", + "inffixed.h", + "inflate.c", + "inflate.h", + "inftrees.c", + "inftrees.h", + "trees.c", + "trees.h", + "uncompr.c", + "zconf.h", + "zlib.h", + "zutil.c", + "zutil.h", + ], + hdrs = [ + "zlib.h", + ], + defines = [ + "CHROMIUM_ZLIB_NO_CHROMECONF", + "CPU_NO_SIMD", + ] + select({ + "@platforms//os:windows": [], + "//conditions:default": [ + "HAVE_HIDDEN", + ], + }), + include_prefix = "third_party/zlib", + visibility = ["//visibility:public"], +) + +cc_library( + name = "zlib_compression_utils", + srcs = [ + "google/compression_utils_portable.cc", + ], + hdrs = [ + "google/compression_utils_portable.h", + ], + include_prefix = "third_party/zlib", + visibility = ["//visibility:public"], + deps = [ + "//external:zlib", + ], +) diff --git a/deps/v8/gni/v8.cmx b/deps/v8/gni/v8.cmx new file mode 100644 index 00000000000000..45fd74a09faffa --- /dev/null +++ b/deps/v8/gni/v8.cmx @@ -0,0 +1,52 @@ +{ + "facets": { + "fuchsia.test": { + "system-services": [ + "fuchsia.kernel.VmexResource" + ] + } + }, + "sandbox": { + "dev": [ + "null", + "zero" + ], + "features": [ + "deprecated-ambient-replace-as-executable", + "isolated-cache-storage", + "isolated-persistent-storage", + "isolated-temp", + "root-ssl-certificates", + "vulkan" + ], + "services": [ + "fuchsia.accessibility.semantics.SemanticsManager", + "fuchsia.camera3.DeviceWatcher", + "fuchsia.device.NameProvider", + "fuchsia.fonts.Provider", + "fuchsia.intl.PropertyProvider", + "fuchsia.kernel.VmexResource", + "fuchsia.logger.Log", + "fuchsia.logger.LogSink", + "fuchsia.media.Audio", + "fuchsia.media.SessionAudioConsumerFactory", + "fuchsia.media.drm.Widevine", + "fuchsia.mediacodec.CodecFactory", + "fuchsia.memorypressure.Provider", + "fuchsia.net.NameLookup", + "fuchsia.net.interfaces.State", + "fuchsia.posix.socket.Provider", + "fuchsia.process.Launcher", + "fuchsia.sys.Environment", + "fuchsia.sys.Launcher", + "fuchsia.sys.Loader", + "fuchsia.sysmem.Allocator", + "fuchsia.ui.input.ImeService", + "fuchsia.ui.input.ImeVisibilityService", + "fuchsia.ui.scenic.Scenic", + "fuchsia.ui.policy.Presenter", + "fuchsia.vulkan.loader.Loader", + "fuchsia.web.ContextProvider" + ] + } +} diff --git a/deps/v8/src/base/atomicops_internals_atomicword_compat.h b/deps/v8/src/base/atomicops_internals_atomicword_compat.h new file mode 100644 index 00000000000000..5ed7d5594e527f --- /dev/null +++ b/deps/v8/src/base/atomicops_internals_atomicword_compat.h @@ -0,0 +1,89 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use atomicops.h instead. + +#ifndef V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ +#define V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ + +// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32, +// which in turn means int. On some LP32 platforms, intptr_t is an int, but +// on others, it's a long. When AtomicWord and Atomic32 are based on different +// fundamental types, their pointers are incompatible. +// +// This file defines function overloads to allow both AtomicWord and Atomic32 +// data to be used with this interface. +// +// On LP64 platforms, AtomicWord and Atomic64 are both always long, +// so this problem doesn't occur. + +#if !defined(V8_HOST_ARCH_64_BIT) + +namespace v8 { +namespace base { + +inline AtomicWord Relaxed_CompareAndSwap(volatile AtomicWord* ptr, + AtomicWord old_value, + AtomicWord new_value) { + return Relaxed_CompareAndSwap(reinterpret_cast(ptr), + old_value, new_value); +} + +inline AtomicWord Relaxed_AtomicExchange(volatile AtomicWord* ptr, + AtomicWord new_value) { + return Relaxed_AtomicExchange(reinterpret_cast(ptr), + new_value); +} + +inline AtomicWord Relaxed_AtomicIncrement(volatile AtomicWord* ptr, + AtomicWord increment) { + return Relaxed_AtomicIncrement(reinterpret_cast(ptr), + increment); +} + +inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, + AtomicWord old_value, + AtomicWord new_value) { + return v8::base::Acquire_CompareAndSwap( + reinterpret_cast(ptr), old_value, new_value); +} + +inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, + AtomicWord old_value, + AtomicWord new_value) { + return v8::base::Release_CompareAndSwap( + reinterpret_cast(ptr), old_value, new_value); +} + +inline AtomicWord AcquireRelease_CompareAndSwap(volatile AtomicWord* ptr, + AtomicWord old_value, + AtomicWord new_value) { + return v8::base::AcquireRelease_CompareAndSwap( + reinterpret_cast(ptr), old_value, new_value); +} + +inline void Relaxed_Store(volatile AtomicWord* ptr, AtomicWord value) { + Relaxed_Store(reinterpret_cast(ptr), value); +} + +inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { + return v8::base::Release_Store( + reinterpret_cast(ptr), value); +} + +inline AtomicWord Relaxed_Load(volatile const AtomicWord* ptr) { + return Relaxed_Load(reinterpret_cast(ptr)); +} + +inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { + return v8::base::Acquire_Load( + reinterpret_cast(ptr)); +} + +} // namespace base +} // namespace v8 + +#endif // !defined(V8_HOST_ARCH_64_BIT) + +#endif // V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_ diff --git a/deps/v8/src/base/functional.cc b/deps/v8/src/base/functional.cc new file mode 100644 index 00000000000000..dffb91f3cc1af4 --- /dev/null +++ b/deps/v8/src/base/functional.cc @@ -0,0 +1,110 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// This also contains public domain code from MurmurHash. From the +// MurmurHash header: +// +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#include "src/base/functional.h" + +#include + +#include "src/base/bits.h" + +namespace v8 { +namespace base { + +namespace { + +// Thomas Wang, Integer Hash Functions. +// https://gist.github.com/badboy/6267743 +template +V8_INLINE size_t hash_value_unsigned(T v) { + switch (sizeof(T)) { + case 4: { + // "32 bit Mix Functions" + v = ~v + (v << 15); // v = (v << 15) - v - 1; + v = v ^ (v >> 12); + v = v + (v << 2); + v = v ^ (v >> 4); + v = v * 2057; // v = (v + (v << 3)) + (v << 11); + v = v ^ (v >> 16); + return static_cast(v); + } + case 8: { + switch (sizeof(size_t)) { + case 4: { + // "64 bit to 32 bit Hash Functions" + v = ~v + (v << 18); // v = (v << 18) - v - 1; + v = v ^ (v >> 31); + v = v * 21; // v = (v + (v << 2)) + (v << 4); + v = v ^ (v >> 11); + v = v + (v << 6); + v = v ^ (v >> 22); + return static_cast(v); + } + case 8: { + // "64 bit Mix Functions" + v = ~v + (v << 21); // v = (v << 21) - v - 1; + v = v ^ (v >> 24); + v = (v + (v << 3)) + (v << 8); // v * 265 + v = v ^ (v >> 14); + v = (v + (v << 2)) + (v << 4); // v * 21 + v = v ^ (v >> 28); + v = v + (v << 31); + return static_cast(v); + } + } + } + } + UNREACHABLE(); +} + +} // namespace + + +// This code was taken from MurmurHash. +size_t hash_combine(size_t seed, size_t value) { +#if V8_HOST_ARCH_32_BIT + const uint32_t c1 = 0xCC9E2D51; + const uint32_t c2 = 0x1B873593; + + value *= c1; + value = bits::RotateRight32(value, 15); + value *= c2; + + seed ^= value; + seed = bits::RotateRight32(seed, 13); + seed = seed * 5 + 0xE6546B64; +#else + const uint64_t m = uint64_t{0xC6A4A7935BD1E995}; + const uint32_t r = 47; + + value *= m; + value ^= value >> r; + value *= m; + + seed ^= value; + seed *= m; +#endif // V8_HOST_ARCH_32_BIT + return seed; +} + + +size_t hash_value(unsigned int v) { return hash_value_unsigned(v); } + + +size_t hash_value(unsigned long v) { // NOLINT(runtime/int) + return hash_value_unsigned(v); +} + + +size_t hash_value(unsigned long long v) { // NOLINT(runtime/int) + return hash_value_unsigned(v); +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h new file mode 100644 index 00000000000000..237f706ea6ac9e --- /dev/null +++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h @@ -0,0 +1,526 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_ +#define V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_ + +#include "src/baseline/baseline-assembler.h" +#include "src/codegen/interface-descriptors.h" +#include "src/codegen/mips/assembler-mips-inl.h" + +namespace v8 { +namespace internal { +namespace baseline { + +class BaselineAssembler::ScratchRegisterScope { + public: + explicit ScratchRegisterScope(BaselineAssembler* assembler) + : assembler_(assembler), + prev_scope_(assembler->scratch_register_scope_), + wrapped_scope_(assembler->masm()) { + if (!assembler_->scratch_register_scope_) { + // If we haven't opened a scratch scope yet, for the first one add a + // couple of extra registers. + wrapped_scope_.Include({t4, t5, t6, t7}); + } + assembler_->scratch_register_scope_ = this; + } + ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } + + Register AcquireScratch() { return wrapped_scope_.Acquire(); } + + private: + BaselineAssembler* assembler_; + ScratchRegisterScope* prev_scope_; + UseScratchRegisterScope wrapped_scope_; +}; + +enum class Condition : uint32_t { + kEqual = eq, + kNotEqual = ne, + + kLessThan = lt, + kGreaterThan = gt, + kLessThanEqual = le, + kGreaterThanEqual = ge, + + kUnsignedLessThan = Uless, + kUnsignedGreaterThan = Ugreater, + kUnsignedLessThanEqual = Uless_equal, + kUnsignedGreaterThanEqual = Ugreater_equal, + + kOverflow = overflow, + kNoOverflow = no_overflow, + + kZero = eq, + kNotZero = ne, +}; + +inline internal::Condition AsMasmCondition(Condition cond) { + // This is important for arm, where the internal::Condition where each value + // represents an encoded bit field value. + STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition)); + return static_cast(cond); +} + +namespace detail { + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + return op.is_reg() && op.rm() == target; +} +#endif + +} // namespace detail + +#define __ masm_-> + +MemOperand BaselineAssembler::RegisterFrameOperand( + interpreter::Register interpreter_register) { + return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); +} +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ Addu(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} +MemOperand BaselineAssembler::FeedbackVectorOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); +} + +void BaselineAssembler::Bind(Label* label) { __ bind(label); } + +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // NOP. +} +void BaselineAssembler::Jump(Label* target, Label::Distance distance) { + __ Branch(target); +} +void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfRoot(value, index, target); +} +void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfNotRoot(value, index, target); +} +void BaselineAssembler::JumpIfSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfSmi(value, target); +} +void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfNotSmi(value, target); +} +void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right, + Label* target, + Label::Distance distance) { + JumpIf(cc, left, Operand(right), target, distance); +} + +void BaselineAssembler::CallBuiltin(Builtin builtin) { + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("call", builtin)); + Register temp = t9; + __ LoadEntryFromBuiltin(builtin, temp); + __ Call(temp); +} + +void BaselineAssembler::TailCallBuiltin(Builtin builtin) { + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("tail call", builtin)); + Register temp = t9; + __ LoadEntryFromBuiltin(builtin, temp); + __ Jump(temp); +} + +void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, + Label* target, Label::Distance) { + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ And(scratch, value, Operand(mask)); + __ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg)); +} + +void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, + Label* target, Label::Distance) { + __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); +} +void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, + InstanceType instance_type, + Register map, Label* target, + Label::Distance) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + __ GetObjectType(object, map, type); + __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); +} +void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, + InstanceType instance_type, + Label* target, Label::Distance) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + if (FLAG_debug_code) { + __ AssertNotSmi(map); + __ GetObjectType(map, type, type); + __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); + } + __ Lw(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); +} +void BaselineAssembler::JumpIfPointer(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ Lw(scratch, operand); + __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, + Label* target, Label::Distance) { + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ li(scratch, Operand(smi)); + __ SmiUntag(scratch); + __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, + Label* target, Label::Distance) { + __ AssertSmi(lhs); + __ AssertSmi(rhs); + __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); +} +void BaselineAssembler::JumpIfTagged(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ Lw(scratch, operand); + __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); +} +void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, + Register value, Label* target, + Label::Distance) { + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ Lw(scratch, operand); + __ Branch(target, AsMasmCondition(cc), scratch, Operand(value)); +} +void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, + Label* target, Label::Distance) { + __ Branch(target, AsMasmCondition(cc), value, Operand(byte)); +} + +void BaselineAssembler::Move(interpreter::Register output, Register source) { + Move(RegisterFrameOperand(output), source); +} +void BaselineAssembler::Move(Register output, TaggedIndex value) { + __ li(output, Operand(value.ptr())); +} +void BaselineAssembler::Move(MemOperand output, Register source) { + __ Sw(source, output); +} +void BaselineAssembler::Move(Register output, ExternalReference reference) { + __ li(output, Operand(reference)); +} +void BaselineAssembler::Move(Register output, Handle value) { + __ li(output, Operand(value)); +} +void BaselineAssembler::Move(Register output, int32_t value) { + __ li(output, Operand(value)); +} +void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { + __ Move(output, source); +} +void BaselineAssembler::MoveSmi(Register output, Register source) { + __ Move(output, source); +} + +namespace detail { + +template +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Arg arg) { + Register reg = scope->AcquireScratch(); + basm->Move(reg, arg); + return reg; +} +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Register reg) { + return reg; +} + +template +struct PushAllHelper; +template <> +struct PushAllHelper<> { + static int Push(BaselineAssembler* basm) { return 0; } + static int PushReverse(BaselineAssembler* basm) { return 0; } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg) { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg)); + return 1; + } + static int PushReverse(BaselineAssembler* basm, Arg arg) { + return Push(basm, arg); + } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg, Args... args) { + PushAllHelper::Push(basm, arg); + return 1 + PushAllHelper::Push(basm, args...); + } + static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { + int nargs = PushAllHelper::PushReverse(basm, args...); + PushAllHelper::Push(basm, arg); + return nargs + 1; + } +}; +template <> +struct PushAllHelper { + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } +}; + +template +struct PopAllHelper; +template <> +struct PopAllHelper<> { + static void Pop(BaselineAssembler* basm) {} +}; +// TODO(ishell): try to pack sequence of pops into one instruction by +// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4) +// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4). +template <> +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg) { + basm->masm()->Pop(reg); + } +}; +template +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg, T... tail) { + PopAllHelper::Pop(basm, reg); + PopAllHelper::Pop(basm, tail...); + } +}; + +} // namespace detail + +template +int BaselineAssembler::Push(T... vals) { + return detail::PushAllHelper::Push(this, vals...); +} + +template +void BaselineAssembler::PushReverse(T... vals) { + detail::PushAllHelper::PushReverse(this, vals...); +} + +template +void BaselineAssembler::Pop(T... registers) { + detail::PopAllHelper::Pop(this, registers...); +} + +void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, + int offset) { + __ Lw(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, + int offset) { + __ Lw(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, + int offset) { + __ Lw(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, + Register source, int offset) { + __ lhu(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadWord8Field(Register output, Register source, + int offset) { + __ lb(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, + Smi value) { + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ li(scratch, Operand(value)); + __ Sw(scratch, FieldMemOperand(target, offset)); +} +void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, + int offset, + Register value) { + ASM_CODE_COMMENT(masm_); + __ Sw(value, FieldMemOperand(target, offset)); + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ RecordWriteField(target, offset, value, scratch, kRAHasNotBeenSaved, + SaveFPRegsMode::kIgnore); +} +void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, + int offset, + Register value) { + __ Sw(value, FieldMemOperand(target, offset)); +} + +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + int32_t weight, Label* skip_interrupt_label) { + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ Lw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + __ Addu(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + if (skip_interrupt_label) { + DCHECK_LT(weight, 0); + __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); + } +} +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + Register weight, Label* skip_interrupt_label) { + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ Lw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + __ Addu(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + if (skip_interrupt_label) + __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { + __ Addu(lhs, lhs, Operand(rhs)); +} + +void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) { + __ And(output, lhs, Operand(rhs)); +} + +void BaselineAssembler::Switch(Register reg, int case_value_base, + Label** labels, int num_labels) { + ASM_CODE_COMMENT(masm_); + Label fallthrough; + if (case_value_base != 0) { + __ Subu(reg, reg, Operand(case_value_base)); + } + + __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), + reg, Operand(num_labels)); + + __ GenerateSwitchTable(reg, num_labels, + [labels](size_t i) { return labels[i]; }); + + __ bind(&fallthrough); +} + +#undef __ + +#define __ basm. + +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { + ASM_CODE_COMMENT(masm); + BaselineAssembler basm(masm); + + Register weight = BaselineLeaveFrameDescriptor::WeightRegister(); + Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister(); + + { + ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget"); + + Label skip_interrupt_label; + __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label); + __ masm()->SmiTag(params_size); + __ masm()->Push(params_size, kInterpreterAccumulatorRegister); + + __ LoadContext(kContextRegister); + __ LoadFunction(kJSFunctionRegister); + __ masm()->Push(kJSFunctionRegister); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); + + __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); + __ masm()->SmiUntag(params_size); + + __ Bind(&skip_interrupt_label); + } + + BaselineAssembler::ScratchRegisterScope temps(&basm); + Register actual_params_size = temps.AcquireScratch(); + // Compute the size of the actual parameters + receiver (in bytes). + __ Move(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); + + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + Label corrected_args_count; + __ masm()->Branch(&corrected_args_count, ge, params_size, + Operand(actual_params_size)); + __ masm()->Move(params_size, actual_params_size); + __ Bind(&corrected_args_count); + + // Leave the frame (also dropping the register file). + __ masm()->LeaveFrame(StackFrame::BASELINE); + + // Drop receiver + arguments. + __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); + + __ masm()->Ret(); +} + +#undef __ + +inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( + Register reg) { + assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg, + Operand(kInterpreterAccumulatorRegister)); +} + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_ diff --git a/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h new file mode 100644 index 00000000000000..3e8bb98e14c196 --- /dev/null +++ b/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h @@ -0,0 +1,78 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_ +#define V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_ + +#include "src/base/logging.h" +#include "src/baseline/baseline-compiler.h" + +namespace v8 { +namespace internal { +namespace baseline { + +#define __ basm_. + +void BaselineCompiler::Prologue() { + ASM_CODE_COMMENT(&masm_); + __ masm()->EnterFrame(StackFrame::BASELINE); + DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + + PrologueFillFrame(); +} + +void BaselineCompiler::PrologueFillFrame() { + ASM_CODE_COMMENT(&masm_); + // Inlined register frame fill + interpreter::Register new_target_or_generator_register = + bytecode_->incoming_new_target_or_generator_register(); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + int register_count = bytecode_->register_count(); + // Magic value + const int kLoopUnrollSize = 8; + const int new_target_index = new_target_or_generator_register.index(); + const bool has_new_target = new_target_index != kMaxInt; + if (has_new_target) { + DCHECK_LE(new_target_index, register_count); + __ masm()->Addu(sp, sp, Operand(-(kPointerSize * new_target_index))); + for (int i = 0; i < new_target_index; i++) { + __ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4)); + } + // Push new_target_or_generator. + __ Push(kJavaScriptCallNewTargetRegister); + register_count -= new_target_index + 1; + } + if (register_count < 2 * kLoopUnrollSize) { + // If the frame is small enough, just unroll the frame fill completely. + __ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4)); + } + } else { + __ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4)); + } + } +} + +void BaselineCompiler::VerifyFrameSize() { + ASM_CODE_COMMENT(&masm_); + __ masm()->Addu(kScratchReg, sp, + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size())); + __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, + Operand(fp)); +} + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_ diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h new file mode 100644 index 00000000000000..d7b9566a166b69 --- /dev/null +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -0,0 +1,540 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_ +#define V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_ + +#include "src/baseline/baseline-assembler.h" +#include "src/codegen/assembler-inl.h" +#include "src/codegen/interface-descriptors.h" +namespace v8 { +namespace internal { +namespace baseline { + +class BaselineAssembler::ScratchRegisterScope { + public: + explicit ScratchRegisterScope(BaselineAssembler* assembler) + : assembler_(assembler), + prev_scope_(assembler->scratch_register_scope_), + wrapped_scope_(assembler->masm()) { + if (!assembler_->scratch_register_scope_) { + // If we haven't opened a scratch scope yet, for the first one add a + // couple of extra registers. + wrapped_scope_.Include(kScratchReg, kScratchReg2); + } + assembler_->scratch_register_scope_ = this; + } + ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } + + Register AcquireScratch() { return wrapped_scope_.Acquire(); } + + private: + BaselineAssembler* assembler_; + ScratchRegisterScope* prev_scope_; + UseScratchRegisterScope wrapped_scope_; +}; + +enum class Condition : uint32_t { + kEqual = eq, + kNotEqual = ne, + + kLessThan = lt, + kGreaterThan = gt, + kLessThanEqual = le, + kGreaterThanEqual = ge, + + kUnsignedLessThan = Uless, + kUnsignedGreaterThan = Ugreater, + kUnsignedLessThanEqual = Uless_equal, + kUnsignedGreaterThanEqual = Ugreater_equal, + + kOverflow = overflow, + kNoOverflow = no_overflow, + + kZero = eq, + kNotZero = ne, +}; + +inline internal::Condition AsMasmCondition(Condition cond) { + return static_cast(cond); +} + +namespace detail { + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + return op.is_reg() && op.rm() == target; +} +#endif + +} // namespace detail + +#define __ masm_-> + +MemOperand BaselineAssembler::RegisterFrameOperand( + interpreter::Register interpreter_register) { + return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); +} +void BaselineAssembler::RegisterFrameAddress( + interpreter::Register interpreter_register, Register rscratch) { + return __ Add64(rscratch, fp, + interpreter_register.ToOperand() * kSystemPointerSize); +} +MemOperand BaselineAssembler::FeedbackVectorOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); +} + +void BaselineAssembler::Bind(Label* label) { __ bind(label); } + +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // Nop +} + +void BaselineAssembler::Jump(Label* target, Label::Distance distance) { + __ jmp(target); +} +void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfRoot(value, index, target); +} +void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfNotRoot(value, index, target); +} +void BaselineAssembler::JumpIfSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfSmi(value, target); +} +void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfSmi(value, target); +} +void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right, + Label* target, + Label::Distance distance) { + JumpIf(cc, left, Operand(right), target, distance); +} +void BaselineAssembler::CallBuiltin(Builtin builtin) { + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("call", builtin)); + Register temp = t6; + __ LoadEntryFromBuiltin(builtin, temp); + __ Call(temp); +} + +void BaselineAssembler::TailCallBuiltin(Builtin builtin) { + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("tail call", builtin)); + Register temp = t6; + __ LoadEntryFromBuiltin(builtin, temp); + __ Jump(temp); +} + +void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, + Label* target, Label::Distance) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ And(tmp, value, Operand(mask)); + __ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg)); +} + +void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, + Label* target, Label::Distance) { + __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); +} +void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, + InstanceType instance_type, + Register map, Label* target, + Label::Distance) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + __ GetObjectType(object, map, type); + __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); +} +void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, + InstanceType instance_type, + Label* target, Label::Distance) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + if (FLAG_debug_code) { + __ AssertNotSmi(map); + __ GetObjectType(map, type, type); + __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); + } + __ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); +} +void BaselineAssembler::JumpIfPointer(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ Ld(temp, operand); + __ Branch(target, AsMasmCondition(cc), value, Operand(temp)); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, + Label* target, Label::Distance) { + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ li(temp, Operand(smi)); + __ SmiUntag(temp); + __ Branch(target, AsMasmCondition(cc), value, Operand(temp)); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, + Label* target, Label::Distance) { + // todo: compress pointer + __ AssertSmi(lhs); + __ AssertSmi(rhs); + __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); +} +void BaselineAssembler::JumpIfTagged(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + // todo: compress pointer + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ Ld(scratch, operand); + __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); +} +void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, + Register value, Label* target, + Label::Distance) { + // todo: compress pointer + ScratchRegisterScope temps(this); + Register scratch = temps.AcquireScratch(); + __ Ld(scratch, operand); + __ Branch(target, AsMasmCondition(cc), scratch, Operand(value)); +} +void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, + Label* target, Label::Distance) { + __ Branch(target, AsMasmCondition(cc), value, Operand(byte)); +} + +void BaselineAssembler::Move(interpreter::Register output, Register source) { + Move(RegisterFrameOperand(output), source); +} +void BaselineAssembler::Move(Register output, TaggedIndex value) { + __ li(output, Operand(value.ptr())); +} +void BaselineAssembler::Move(MemOperand output, Register source) { + __ Sd(source, output); +} +void BaselineAssembler::Move(Register output, ExternalReference reference) { + __ li(output, Operand(reference)); +} +void BaselineAssembler::Move(Register output, Handle value) { + __ li(output, Operand(value)); +} +void BaselineAssembler::Move(Register output, int32_t value) { + __ li(output, Operand(value)); +} +void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { + __ Move(output, source); +} +void BaselineAssembler::MoveSmi(Register output, Register source) { + __ Move(output, source); +} + +namespace detail { + +template +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Arg arg) { + Register reg = scope->AcquireScratch(); + basm->Move(reg, arg); + return reg; +} +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Register reg) { + return reg; +} + +template +struct PushAllHelper; +template <> +struct PushAllHelper<> { + static int Push(BaselineAssembler* basm) { return 0; } + static int PushReverse(BaselineAssembler* basm) { return 0; } +}; +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg) { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg)); + return 1; + } + static int PushReverse(BaselineAssembler* basm, Arg arg) { + return Push(basm, arg); + } +}; +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg, Args... args) { + PushAllHelper::Push(basm, arg); + return 1 + PushAllHelper::Push(basm, args...); + } + static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { + int nargs = PushAllHelper::PushReverse(basm, args...); + PushAllHelper::Push(basm, arg); + return nargs + 1; + } +}; +template <> +struct PushAllHelper { + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } +}; + +template +struct PopAllHelper; +template <> +struct PopAllHelper<> { + static void Pop(BaselineAssembler* basm) {} +}; +template <> +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg) { + basm->masm()->Pop(reg); + } +}; +template +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg, T... tail) { + PopAllHelper::Pop(basm, reg); + PopAllHelper::Pop(basm, tail...); + } +}; + +} // namespace detail + +template +int BaselineAssembler::Push(T... vals) { + return detail::PushAllHelper::Push(this, vals...); +} + +template +void BaselineAssembler::PushReverse(T... vals) { + detail::PushAllHelper::PushReverse(this, vals...); +} + +template +void BaselineAssembler::Pop(T... registers) { + detail::PopAllHelper::Pop(this, registers...); +} + +void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, + int offset) { + __ LoadTaggedPointerField(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, + int offset) { + __ LoadTaggedSignedField(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, + int offset) { + __ LoadAnyTaggedField(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, + Register source, int offset) { + __ Lhu(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadWord8Field(Register output, Register source, + int offset) { + __ Lb(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, + Smi value) { + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ li(tmp, Operand(value)); + __ StoreTaggedField(tmp, FieldMemOperand(target, offset)); +} +void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, + int offset, + Register value) { + ASM_CODE_COMMENT(masm_); + __ StoreTaggedField(value, FieldMemOperand(target, offset)); + __ RecordWriteField(target, offset, value, kRAHasNotBeenSaved, + SaveFPRegsMode::kIgnore); +} +void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, + int offset, + Register value) { + __ StoreTaggedField(value, FieldMemOperand(target, offset)); +} + +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + int32_t weight, Label* skip_interrupt_label) { + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ Lw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ Add32(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + if (skip_interrupt_label) { + DCHECK_LT(weight, 0); + __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight)); + } +} + +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + Register weight, Label* skip_interrupt_label) { + ASM_CODE_COMMENT(masm_); + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ Lw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ Add32(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + if (skip_interrupt_label) + __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight)); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { + ASM_CODE_COMMENT(masm_); + if (SmiValuesAre31Bits()) { + __ Add32(lhs, lhs, Operand(rhs)); + } else { + __ Add64(lhs, lhs, Operand(rhs)); + } +} + +void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) { + __ And(output, lhs, Operand(rhs)); +} + +void BaselineAssembler::Switch(Register reg, int case_value_base, + Label** labels, int num_labels) { + ASM_CODE_COMMENT(masm_); + Label fallthrough; + if (case_value_base != 0) { + __ Sub64(reg, reg, Operand(case_value_base)); + } + + // Mostly copied from code-generator-riscv64.cc + ScratchRegisterScope scope(this); + Label table; + __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), + reg, Operand(int64_t(num_labels))); + int64_t imm64; + imm64 = __ branch_long_offset(&table); + CHECK(is_int32(imm64 + 0x800)); + int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12); + int32_t Lo12 = (int32_t)imm64 << 20 >> 20; + __ BlockTrampolinePoolFor(2); + __ auipc(t6, Hi20); // Read PC + Hi20 into t6 + __ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12 + + int entry_size_log2 = 3; + __ CalcScaledAddress(t6, t6, reg, entry_size_log2); + __ Jump(t6); + { + TurboAssembler::BlockTrampolinePoolScope(masm()); + __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); + __ bind(&table); + for (int i = 0; i < num_labels; ++i) { + __ BranchLong(labels[i]); + } + DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table)); + __ bind(&fallthrough); + } +} + +#undef __ + +#define __ basm. + +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { + ASM_CODE_COMMENT(masm); + BaselineAssembler basm(masm); + + Register weight = BaselineLeaveFrameDescriptor::WeightRegister(); + Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister(); + + { + ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget"); + + Label skip_interrupt_label; + __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label); + __ masm()->SmiTag(params_size); + __ masm()->Push(params_size, kInterpreterAccumulatorRegister); + + __ LoadContext(kContextRegister); + __ LoadFunction(kJSFunctionRegister); + __ masm()->Push(kJSFunctionRegister); + __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1); + + __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); + __ masm()->SmiUntag(params_size); + + __ Bind(&skip_interrupt_label); + } + + BaselineAssembler::ScratchRegisterScope temps(&basm); + Register actual_params_size = temps.AcquireScratch(); + // Compute the size of the actual parameters + receiver (in bytes). + __ Move(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); + + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + Label corrected_args_count; + __ masm()->Branch(&corrected_args_count, ge, params_size, + Operand(actual_params_size), Label::Distance::kNear); + __ masm()->Move(params_size, actual_params_size); + __ Bind(&corrected_args_count); + + // Leave the frame (also dropping the register file). + __ masm()->LeaveFrame(StackFrame::BASELINE); + + // Drop receiver + arguments. + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); + __ masm()->Ret(); +} + +#undef __ + +inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( + Register reg) { + assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg, + Operand(kInterpreterAccumulatorRegister)); +} +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_ diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h new file mode 100644 index 00000000000000..1fbdaa0761e1ea --- /dev/null +++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h @@ -0,0 +1,79 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_ +#define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_ + +#include "src/baseline/baseline-compiler.h" + +namespace v8 { +namespace internal { +namespace baseline { + +#define __ basm_. + +void BaselineCompiler::Prologue() { + ASM_CODE_COMMENT(&masm_); + // Enter the frame here, since CallBuiltin will override lr. + __ masm()->EnterFrame(StackFrame::BASELINE); + DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + PrologueFillFrame(); +} + +void BaselineCompiler::PrologueFillFrame() { + ASM_CODE_COMMENT(&masm_); + // Inlined register frame fill + interpreter::Register new_target_or_generator_register = + bytecode_->incoming_new_target_or_generator_register(); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + int register_count = bytecode_->register_count(); + // Magic value + const int kLoopUnrollSize = 8; + const int new_target_index = new_target_or_generator_register.index(); + const bool has_new_target = new_target_index != kMaxInt; + if (has_new_target) { + DCHECK_LE(new_target_index, register_count); + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index))); + for (int i = 0; i < new_target_index; i++) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); + } + // Push new_target_or_generator. + __ Push(kJavaScriptCallNewTargetRegister); + register_count -= new_target_index + 1; + } + if (register_count < 2 * kLoopUnrollSize) { + // If the frame is small enough, just unroll the frame fill completely. + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); + } + } else { + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); + } + } +} + +void BaselineCompiler::VerifyFrameSize() { + ASM_CODE_COMMENT(&masm_); + __ masm()->Add64(kScratchReg, sp, + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size())); + __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, + Operand(fp)); +} + +#undef __ + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_ diff --git a/deps/v8/src/builtins/builtins-shadow-realms.cc b/deps/v8/src/builtins/builtins-shadow-realms.cc new file mode 100644 index 00000000000000..08b3f3ec31c818 --- /dev/null +++ b/deps/v8/src/builtins/builtins-shadow-realms.cc @@ -0,0 +1,226 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/builtins-utils-inl.h" +#include "src/codegen/compiler.h" +#include "src/logging/counters.h" +#include "src/objects/js-shadow-realms-inl.h" + +namespace v8 { +namespace internal { + +// https://tc39.es/proposal-shadowrealm/#sec-shadowrealm-constructor +BUILTIN(ShadowRealmConstructor) { + HandleScope scope(isolate); + // 1. If NewTarget is undefined, throw a TypeError exception. + if (args.new_target()->IsUndefined(isolate)) { // [[Call]] + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kConstructorNotFunction, + isolate->factory()->ShadowRealm_string())); + } + // [[Construct]] + Handle target = args.target(); + Handle new_target = Handle::cast(args.new_target()); + + // 3. Let realmRec be CreateRealm(). + // 5. Let context be a new execution context. + // 6. Set the Function of context to null. + // 7. Set the Realm of context to realmRec. + // 8. Set the ScriptOrModule of context to null. + // 10. Perform ? SetRealmGlobalObject(realmRec, undefined, undefined). + // 11. Perform ? SetDefaultGlobalBindings(O.[[ShadowRealm]]). + // 12. Perform ? HostInitializeShadowRealm(O.[[ShadowRealm]]). + // These steps are combined in + // Isolate::RunHostCreateShadowRealmContextCallback and Context::New. + // The host operation is hoisted for not creating a half-initialized + // ShadowRealm object, which can fail the heap verification. + Handle native_context; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, native_context, + isolate->RunHostCreateShadowRealmContextCallback()); + + // 2. Let O be ? OrdinaryCreateFromConstructor(NewTarget, + // "%ShadowRealm.prototype%", « [[ShadowRealm]], [[ExecutionContext]] »). + Handle result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::New(target, new_target, Handle::null())); + Handle O = Handle::cast(result); + + // 4. Set O.[[ShadowRealm]] to realmRec. + // 9. Set O.[[ExecutionContext]] to context. + O->set_native_context(*native_context); + + // 13. Return O. + return *O; +} + +namespace { + +// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue +MaybeHandle GetWrappedValue(Isolate* isolate, + Handle creation_context, + Handle value) { + // 1. If Type(value) is Object, then + if (!value->IsJSReceiver()) { + // 2. Return value. + return value; + } + // 1a. If IsCallable(value) is false, throw a TypeError exception. + if (!value->IsCallable()) { + // The TypeError thrown is created with creation Realm's TypeError + // constructor instead of the executing Realm's. + THROW_NEW_ERROR_RETURN_VALUE( + isolate, + NewError(Handle(creation_context->type_error_function(), + isolate), + MessageTemplate::kNotCallable), + {}); + } + // 1b. Return ? WrappedFunctionCreate(callerRealm, value). + return JSWrappedFunction::Create(isolate, creation_context, + Handle::cast(value)); +} + +} // namespace + +// https://tc39.es/proposal-shadowrealm/#sec-shadowrealm.prototype.evaluate +BUILTIN(ShadowRealmPrototypeEvaluate) { + HandleScope scope(isolate); + + Handle source_text = args.atOrUndefined(isolate, 1); + // 1. Let O be this value. + Handle receiver = args.receiver(); + + Factory* factory = isolate->factory(); + + // 2. Perform ? ValidateShadowRealmObject(O). + if (!receiver->IsJSShadowRealm()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver)); + } + Handle shadow_realm = Handle::cast(receiver); + + // 3. If Type(sourceText) is not String, throw a TypeError exception. + if (!source_text->IsString()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kInvalidShadowRealmEvaluateSourceText)); + } + + // 4. Let callerRealm be the current Realm Record. + Handle caller_context = isolate->native_context(); + + // 5. Let evalRealm be O.[[ShadowRealm]]. + Handle eval_context = + Handle(shadow_realm->native_context(), isolate); + // 6. Return ? PerformShadowRealmEval(sourceText, callerRealm, evalRealm). + + // PerformShadowRealmEval + // https://tc39.es/proposal-shadowrealm/#sec-performshadowrealmeval + // 1. Perform ? HostEnsureCanCompileStrings(callerRealm, evalRealm). + // Run embedder pre-checks before executing the source code. + MaybeHandle validated_source; + bool unhandled_object; + std::tie(validated_source, unhandled_object) = + Compiler::ValidateDynamicCompilationSource(isolate, eval_context, + source_text); + if (unhandled_object) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kInvalidShadowRealmEvaluateSourceText)); + } + + Handle eval_global_proxy(eval_context->global_proxy(), isolate); + MaybeHandle result; + bool is_parse_failed = false; + { + // 8. If runningContext is not already suspended, suspend runningContext. + // 9. Let evalContext be a new ECMAScript code execution context. + // 10. Set evalContext's Function to null. + // 11. Set evalContext's Realm to evalRealm. + // 12. Set evalContext's ScriptOrModule to null. + // 13. Set evalContext's VariableEnvironment to varEnv. + // 14. Set evalContext's LexicalEnvironment to lexEnv. + // 15. Push evalContext onto the execution context stack; evalContext is now + // the running execution context. + SaveAndSwitchContext save(isolate, *eval_context); + + // 2. Perform the following substeps in an implementation-defined order, + // possibly interleaving parsing and error detection: + // 2a. Let script be ParseText(! StringToCodePoints(sourceText), Script). + // 2b. If script is a List of errors, throw a SyntaxError exception. + // 2c. If script Contains ScriptBody is false, return undefined. + // 2d. Let body be the ScriptBody of script. + // 2e. If body Contains NewTarget is true, throw a SyntaxError + // exception. + // 2f. If body Contains SuperProperty is true, throw a SyntaxError + // exception. + // 2g. If body Contains SuperCall is true, throw a SyntaxError exception. + // 3. Let strictEval be IsStrict of script. + // 4. Let runningContext be the running execution context. + // 5. Let lexEnv be NewDeclarativeEnvironment(evalRealm.[[GlobalEnv]]). + // 6. Let varEnv be evalRealm.[[GlobalEnv]]. + // 7. If strictEval is true, set varEnv to lexEnv. + Handle function; + MaybeHandle maybe_function = + Compiler::GetFunctionFromValidatedString(eval_context, validated_source, + NO_PARSE_RESTRICTION, + kNoSourcePosition); + if (maybe_function.is_null()) { + is_parse_failed = true; + } else { + function = maybe_function.ToHandleChecked(); + + // 16. Let result be EvalDeclarationInstantiation(body, varEnv, + // lexEnv, null, strictEval). + // 17. If result.[[Type]] is normal, then + // 20a. Set result to the result of evaluating body. + // 18. If result.[[Type]] is normal and result.[[Value]] is empty, then + // 21a. Set result to NormalCompletion(undefined). + result = + Execution::Call(isolate, function, eval_global_proxy, 0, nullptr); + + // 19. Suspend evalContext and remove it from the execution context stack. + // 20. Resume the context that is now on the top of the execution context + // stack as the running execution context. Done by the scope. + } + } + + if (result.is_null()) { + DCHECK(isolate->has_pending_exception()); + Handle pending_exception = + Handle(isolate->pending_exception(), isolate); + isolate->clear_pending_exception(); + if (is_parse_failed) { + Handle error_object = Handle::cast(pending_exception); + Handle message = Handle::cast(JSReceiver::GetDataProperty( + isolate, error_object, factory->message_string())); + + return isolate->ReThrow( + *factory->NewError(isolate->syntax_error_function(), message)); + } + // 21. If result.[[Type]] is not normal, throw a TypeError exception. + // TODO(v8:11989): provide a non-observable inspection on the + // pending_exception to the newly created TypeError. + // https://github.com/tc39/proposal-shadowrealm/issues/353 + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kCallShadowRealmFunctionThrown)); + } + // 22. Return ? GetWrappedValue(callerRealm, result.[[Value]]). + Handle wrapped_result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, wrapped_result, + GetWrappedValue(isolate, caller_context, result.ToHandleChecked())); + return *wrapped_result; +} + +// https://tc39.es/proposal-shadowrealm/#sec-shadowrealm.prototype.importvalue +BUILTIN(ShadowRealmPrototypeImportValue) { + HandleScope scope(isolate); + return ReadOnlyRoots(isolate).undefined_value(); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/builtins/builtins-shadowrealm-gen.cc b/deps/v8/src/builtins/builtins-shadowrealm-gen.cc new file mode 100644 index 00000000000000..f65f611683c421 --- /dev/null +++ b/deps/v8/src/builtins/builtins-shadowrealm-gen.cc @@ -0,0 +1,248 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/builtins-utils-gen.h" +#include "src/builtins/builtins.h" +#include "src/codegen/code-stub-assembler.h" +#include "src/objects/descriptor-array.h" + +namespace v8 { +namespace internal { + +class ShadowRealmBuiltinsAssembler : public CodeStubAssembler { + public: + explicit ShadowRealmBuiltinsAssembler(compiler::CodeAssemblerState* state) + : CodeStubAssembler(state) {} + + protected: + TNode AllocateJSWrappedFunction(TNode context, + TNode target); + void CheckAccessor(TNode array, TNode index, + TNode name, Label* bailout); +}; + +TNode ShadowRealmBuiltinsAssembler::AllocateJSWrappedFunction( + TNode context, TNode target) { + TNode native_context = LoadNativeContext(context); + TNode map = CAST( + LoadContextElement(native_context, Context::WRAPPED_FUNCTION_MAP_INDEX)); + TNode wrapped = AllocateJSObjectFromMap(map); + StoreObjectFieldNoWriteBarrier( + wrapped, JSWrappedFunction::kWrappedTargetFunctionOffset, target); + StoreObjectFieldNoWriteBarrier(wrapped, JSWrappedFunction::kContextOffset, + context); + return wrapped; +} + +void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode array, + TNode index, + TNode name, + Label* bailout) { + TNode key = LoadKeyByDescriptorEntry(array, index); + GotoIfNot(TaggedEqual(key, name), bailout); + TNode value = LoadValueByDescriptorEntry(array, index); + GotoIfNot(IsAccessorInfo(CAST(value)), bailout); +} + +// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue +TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) { + auto context = Parameter(Descriptor::kContext); + auto creation_context = Parameter(Descriptor::kCreationContext); + auto target_context = Parameter(Descriptor::kTargetContext); + auto value = Parameter(Descriptor::kValue); + + Label if_primitive(this), if_callable(this), unwrap(this), wrap(this), + slow_wrap(this, Label::kDeferred), bailout(this, Label::kDeferred); + + // 2. Return value. + GotoIf(TaggedIsSmi(value), &if_primitive); + GotoIfNot(IsJSReceiver(CAST(value)), &if_primitive); + + // 1. If Type(value) is Object, then + // 1a. If IsCallable(value) is false, throw a TypeError exception. + // 1b. Return ? WrappedFunctionCreate(callerRealm, value). + Branch(IsCallable(CAST(value)), &if_callable, &bailout); + + BIND(&if_primitive); + Return(value); + + BIND(&if_callable); + TVARIABLE(Object, target); + target = value; + // WrappedFunctionCreate + // https://tc39.es/proposal-shadowrealm/#sec-wrappedfunctioncreate + Branch(IsJSWrappedFunction(CAST(value)), &unwrap, &wrap); + + BIND(&unwrap); + // The intermediate wrapped functions are not user-visible. And calling a + // wrapped function won't cause a side effect in the creation realm. + // Unwrap here to avoid nested unwrapping at the call site. + TNode target_wrapped_function = CAST(value); + target = LoadObjectField(target_wrapped_function, + JSWrappedFunction::kWrappedTargetFunctionOffset); + Goto(&wrap); + + BIND(&wrap); + // Disallow wrapping of slow-mode functions. We need to figure out + // whether the length and name property are in the original state. + TNode map = LoadMap(CAST(target.value())); + GotoIf(IsDictionaryMap(map), &slow_wrap); + + // Check whether the length and name properties are still present as + // AccessorInfo objects. If so, their value can be recomputed even if + // the actual value on the object changes. + TNode bit_field3 = LoadMapBitField3(map); + TNode number_of_own_descriptors = Signed( + DecodeWordFromWord32(bit_field3)); + GotoIf(IntPtrLessThan( + number_of_own_descriptors, + IntPtrConstant(JSFunction::kMinDescriptorsForFastBindAndWrap)), + &slow_wrap); + + // We don't need to check the exact accessor here because the only case + // custom accessor arise is with function templates via API, and in that + // case the object is in dictionary mode + TNode descriptors = LoadMapInstanceDescriptors(map); + CheckAccessor( + descriptors, + IntPtrConstant( + JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex), + LengthStringConstant(), &slow_wrap); + CheckAccessor( + descriptors, + IntPtrConstant( + JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex), + NameStringConstant(), &slow_wrap); + + // Verify that prototype matches the function prototype of the target + // context. + TNode prototype = LoadMapPrototype(map); + TNode function_map = + LoadContextElement(target_context, Context::WRAPPED_FUNCTION_MAP_INDEX); + TNode function_prototype = LoadMapPrototype(CAST(function_map)); + GotoIf(TaggedNotEqual(prototype, function_prototype), &slow_wrap); + + // 1. Let internalSlotsList be the internal slots listed in Table 2, plus + // [[Prototype]] and [[Extensible]]. + // 2. Let wrapped be ! MakeBasicObject(internalSlotsList). + // 3. Set wrapped.[[Prototype]] to + // callerRealm.[[Intrinsics]].[[%Function.prototype%]]. + // 4. Set wrapped.[[Call]] as described in 2.1. + // 5. Set wrapped.[[WrappedTargetFunction]] to Target. + // 6. Set wrapped.[[Realm]] to callerRealm. + // 7. Let result be CopyNameAndLength(wrapped, Target, "wrapped"). + // 8. If result is an Abrupt Completion, throw a TypeError exception. + // Installed with default accessors. + TNode wrapped = + AllocateJSWrappedFunction(creation_context, target.value()); + + // 9. Return wrapped. + Return(wrapped); + + BIND(&slow_wrap); + { + Return(CallRuntime(Runtime::kShadowRealmWrappedFunctionCreate, context, + creation_context, target.value())); + } + + BIND(&bailout); + ThrowTypeError(context, MessageTemplate::kNotCallable, value); +} + +// https://tc39.es/proposal-shadowrealm/#sec-wrapped-function-exotic-objects-call-thisargument-argumentslist +TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) { + auto argc = UncheckedParameter(Descriptor::kActualArgumentsCount); + TNode argc_ptr = ChangeInt32ToIntPtr(argc); + auto wrapped_function = Parameter(Descriptor::kFunction); + auto context = Parameter(Descriptor::kContext); + + PerformStackCheck(context); + + Label call_exception(this, Label::kDeferred), + target_not_callable(this, Label::kDeferred); + + // 1. Let target be F.[[WrappedTargetFunction]]. + TNode target = CAST(LoadObjectField( + wrapped_function, JSWrappedFunction::kWrappedTargetFunctionOffset)); + // 2. Assert: IsCallable(target) is true. + CSA_DCHECK(this, IsCallable(target)); + + // 4. Let callerRealm be ? GetFunctionRealm(F). + TNode caller_context = LoadObjectField( + wrapped_function, JSWrappedFunction::kContextOffset); + // 3. Let targetRealm be ? GetFunctionRealm(target). + TNode target_context = + GetFunctionRealm(caller_context, target, &target_not_callable); + // 5. NOTE: Any exception objects produced after this point are associated + // with callerRealm. + + CodeStubArguments args(this, argc_ptr); + TNode receiver = args.GetReceiver(); + + // 6. Let wrappedArgs be a new empty List. + TNode wrapped_args = + CAST(AllocateFixedArray(ElementsKind::PACKED_ELEMENTS, argc_ptr)); + // Fill the fixed array so that heap verifier doesn't complain about it. + FillFixedArrayWithValue(ElementsKind::PACKED_ELEMENTS, wrapped_args, + IntPtrConstant(0), argc_ptr, + RootIndex::kUndefinedValue); + + // 8. Let wrappedThisArgument to ? GetWrappedValue(targetRealm, thisArgument). + // Create wrapped value in the target realm. + TNode wrapped_receiver = + CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context, + target_context, caller_context, receiver); + StoreFixedArrayElement(wrapped_args, 0, wrapped_receiver); + // 7. For each element arg of argumentsList, do + BuildFastLoop( + IntPtrConstant(0), args.GetLengthWithoutReceiver(), + [&](TNode index) { + // 7a. Let wrappedValue be ? GetWrappedValue(targetRealm, arg). + // Create wrapped value in the target realm. + TNode wrapped_value = + CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context, + target_context, caller_context, args.AtIndex(index)); + // 7b. Append wrappedValue to wrappedArgs. + StoreFixedArrayElement( + wrapped_args, IntPtrAdd(index, IntPtrConstant(1)), wrapped_value); + }, + 1, IndexAdvanceMode::kPost); + + TVARIABLE(Object, var_exception); + TNode result; + { + compiler::ScopedExceptionHandler handler(this, &call_exception, + &var_exception); + TNode args_count = Int32Constant(0); // args already on the stack + Callable callable = CodeFactory::CallVarargs(isolate()); + + // 9. Let result be the Completion Record of Call(target, + // wrappedThisArgument, wrappedArgs). + result = CallStub(callable, target_context, target, args_count, argc, + wrapped_args); + } + + // 10. If result.[[Type]] is normal or result.[[Type]] is return, then + // 10a. Return ? GetWrappedValue(callerRealm, result.[[Value]]). + TNode wrapped_result = + CallBuiltin(Builtin::kShadowRealmGetWrappedValue, caller_context, + caller_context, target_context, result); + args.PopAndReturn(wrapped_result); + + // 11. Else, + BIND(&call_exception); + // 11a. Throw a TypeError exception. + // TODO(v8:11989): provide a non-observable inspection on the + // pending_exception to the newly created TypeError. + // https://github.com/tc39/proposal-shadowrealm/issues/353 + ThrowTypeError(context, MessageTemplate::kCallShadowRealmFunctionThrown, + var_exception.value()); + + BIND(&target_not_callable); + // A wrapped value should not be non-callable. + Unreachable(); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc new file mode 100644 index 00000000000000..c11ced1b728a13 --- /dev/null +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -0,0 +1,4213 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_MIPS + +#include "src/api/api-arguments.h" +#include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" +#include "src/debug/debug.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/execution/frame-constants.h" +#include "src/execution/frames.h" +#include "src/logging/counters.h" +// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. +#include "src/codegen/macro-assembler-inl.h" +#include "src/codegen/mips/constants-mips.h" +#include "src/codegen/register-configuration.h" +#include "src/heap/heap-inl.h" +#include "src/objects/cell.h" +#include "src/objects/foreign.h" +#include "src/objects/heap-number.h" +#include "src/objects/js-generator.h" +#include "src/objects/objects-inl.h" +#include "src/objects/smi.h" +#include "src/runtime/runtime.h" + +#if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/wasm-linkage.h" +#include "src/wasm/wasm-objects.h" +#endif // V8_ENABLE_WEBASSEMBLY + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + +void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { + __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); + __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), + RelocInfo::CODE_TARGET); +} + +static void GenerateTailCallToReturnedCode(MacroAssembler* masm, + Runtime::FunctionId function_id) { + // ----------- S t a t e ------------- + // -- a0 : actual argument count + // -- a1 : target function (preserved for callee) + // -- a3 : new target (preserved for callee) + // ----------------------------------- + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push a copy of the target function, the new target and the actual + // argument count. + // Push function as parameter to the runtime call. + __ SmiTag(kJavaScriptCallArgCountRegister); + __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, + kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister); + + __ CallRuntime(function_id, 1); + + // Restore target function, new target and actual argument count. + __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, + kJavaScriptCallArgCountRegister); + __ SmiUntag(kJavaScriptCallArgCountRegister); + } + + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag); + __ Jump(a2); +} + +namespace { + +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, Register scratch2, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Label loop, entry; + __ Subu(scratch, argc, Operand(kJSArgcReceiverSlots)); + __ Branch(&entry); + __ bind(&loop); + __ Lsa(scratch2, array, scratch, kSystemPointerSizeLog2); + __ lw(scratch2, MemOperand(scratch2)); + if (element_type == ArgumentsElementType::kHandle) { + __ lw(scratch2, MemOperand(scratch2)); + } + __ push(scratch2); + __ bind(&entry); + __ Addu(scratch, scratch, Operand(-1)); + __ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); +} + +void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- a1 : constructor function + // -- a3 : new target + // -- cp : context + // -- ra : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + + // Enter a construct frame. + { + FrameScope scope(masm, StackFrame::CONSTRUCT); + + // Preserve the incoming parameters on the stack. + __ SmiTag(a0); + __ Push(cp, a0); + __ SmiUntag(a0); + // Set up pointer to first argument (skip receiver). + __ Addu( + t2, fp, + Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); + // Copy arguments and receiver to the expression stack. + // t2: Pointer to start of arguments. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw); + // The receiver for the builtin/api call. + __ PushRoot(RootIndex::kTheHoleValue); + + // Call the function. + // a0: number of arguments (untagged) + // a1: constructor function + // a3: new target + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); + + // Restore context from the frame. + __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + // Restore smi-tagged arguments count from the frame. + __ lw(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + // Leave construct frame. + } + + // Remove caller arguments from the stack and return. + __ DropArguments(t3, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountIncludesReceiver); + __ Ret(); +} + +} // namespace + +// The construct stub for ES5 constructor functions and ES6 class constructors. +void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0: number of arguments (untagged) + // -- a1: constructor function + // -- a3: new target + // -- cp: context + // -- ra: return address + // -- sp[...]: constructor arguments + // ----------------------------------- + + // Enter a construct frame. + FrameScope scope(masm, StackFrame::MANUAL); + Label post_instantiation_deopt_entry, not_create_implicit_receiver; + __ EnterFrame(StackFrame::CONSTRUCT); + + // Preserve the incoming parameters on the stack. + __ SmiTag(a0); + __ Push(cp, a0, a1); + __ PushRoot(RootIndex::kTheHoleValue); + __ Push(a3); + + // ----------- S t a t e ------------- + // -- sp[0*kPointerSize]: new target + // -- sp[1*kPointerSize]: padding + // -- a1 and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments (tagged) + // -- sp[4*kPointerSize]: context + // ----------------------------------- + + __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); + __ DecodeField(t2); + __ JumpIfIsInRange( + t2, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); + + // If not derived class constructor: Allocate the new receiver object. + __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, + t2, t3); + __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET); + __ Branch(&post_instantiation_deopt_entry); + + // Else: use TheHoleValue as receiver for constructor call + __ bind(¬_create_implicit_receiver); + __ LoadRoot(v0, RootIndex::kTheHoleValue); + + // ----------- S t a t e ------------- + // -- v0: receiver + // -- Slot 4 / sp[0*kPointerSize]: new target + // -- Slot 3 / sp[1*kPointerSize]: padding + // -- Slot 2 / sp[2*kPointerSize]: constructor function + // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize]: context + // ----------------------------------- + // Deoptimizer enters here. + masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( + masm->pc_offset()); + __ bind(&post_instantiation_deopt_entry); + + // Restore new target. + __ Pop(a3); + + // Push the allocated receiver to the stack. + __ Push(v0); + + // We need two copies because we may have to return the original one + // and the calling conventions dictate that the called function pops the + // receiver. The second copy is pushed after the arguments, we saved in s0 + // since v0 will store the return value of callRuntime. + __ mov(s0, v0); + + // Set up pointer to last argument. + __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + + kSystemPointerSize)); + + // ----------- S t a t e ------------- + // -- r3: new target + // -- sp[0*kPointerSize]: implicit receiver + // -- sp[1*kPointerSize]: implicit receiver + // -- sp[2*kPointerSize]: padding + // -- sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context + // ----------------------------------- + + // Restore constructor function and argument count. + __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); + __ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + __ SmiUntag(a0); + + Label stack_overflow; + __ StackOverflowCheck(a0, t0, t1, &stack_overflow); + + // TODO(victorgomes): When the arguments adaptor is completely removed, we + // should get the formal parameter count and copy the arguments in its + // correct position (including any undefined), instead of delaying this to + // InvokeFunction. + + // Copy arguments and receiver to the expression stack. + // t2: Pointer to start of argument. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw); + + // We need two copies because we may have to return the original one + // and the calling conventions dictate that the called function pops the + // receiver. The second copy is pushed after the arguments. + __ Push(s0); + + // Call the function. + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); + + // ----------- S t a t e ------------- + // -- v0: constructor result + // -- sp[0*kPointerSize]: implicit receiver + // -- sp[1*kPointerSize]: padding + // -- sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments + // -- sp[4*kPointerSize]: context + // ----------------------------------- + + // Store offset of return address for deoptimizer. + masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( + masm->pc_offset()); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, do_throw, leave_and_return, check_receiver; + + // If the result is undefined, we jump out to using the implicit receiver. + __ JumpIfNotRoot(v0, RootIndex::kUndefinedValue, &check_receiver); + + // Otherwise we do a smi check and fall through to check if the return value + // is a valid receiver. + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ lw(v0, MemOperand(sp, 0 * kPointerSize)); + __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw); + + __ bind(&leave_and_return); + // Restore smi-tagged arguments count from the frame. + __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + // Leave construct frame. + __ LeaveFrame(StackFrame::CONSTRUCT); + + // Remove caller arguments from the stack and return. + __ DropArguments(a1, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountIncludesReceiver); + __ Ret(); + + __ bind(&check_receiver); + // If the result is a smi, it is *not* an object in the ECMA sense. + __ JumpIfSmi(v0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. + __ GetObjectType(v0, t2, t2); + STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); + __ Branch(&leave_and_return, greater_equal, t2, + Operand(FIRST_JS_RECEIVER_TYPE)); + __ Branch(&use_receiver); + + __ bind(&do_throw); + // Restore the context from the frame. + __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); + __ break_(0xCC); + + __ bind(&stack_overflow); + // Restore the context from the frame. + __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); +} + +void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { + Generate_JSBuiltinsConstructStubHelper(masm); +} + +void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kThrowConstructedNonConstructable); +} + +// Clobbers scratch1 and scratch2; preserves all other registers. +static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, + Register scratch1, Register scratch2) { + ASM_CODE_COMMENT(masm); + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit); + // Make a2 the space we have left. The stack might already be overflowed + // here which will cause a2 to become negative. + __ Subu(scratch1, sp, scratch1); + // Check if the arguments will overflow the stack. + __ sll(scratch2, argc, kPointerSizeLog2); + // Signed comparison. + __ Branch(&okay, gt, scratch1, Operand(scratch2)); + + // Out of stack space. + __ CallRuntime(Runtime::kThrowStackOverflow); + + __ bind(&okay); +} + +namespace { + +// Used by JSEntryTrampoline to refer C++ parameter to JSEntryVariant. +constexpr int kPushedStackSpace = + kCArgsSlotsSize + (kNumCalleeSaved + 1) * kPointerSize + + kNumCalleeSavedFPU * kDoubleSize + 4 * kPointerSize + + EntryFrameConstants::kCallerFPOffset; + +// Called with the native C calling convention. The corresponding function +// signature is either: +// +// using JSEntryFunction = GeneratedCode; +// or +// using JSEntryFunction = GeneratedCode; +// +// Passes through a0, a1, a2, a3 and stack to JSEntryTrampoline. +void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, + Builtin entry_trampoline) { + Label invoke, handler_entry, exit; + + int pushed_stack_space = kCArgsSlotsSize; + { + NoRootArrayScope no_root_array(masm); + + // Registers: + // a0: root_register_value + + // Save callee saved registers on the stack. + __ MultiPush(kCalleeSaved | ra); + pushed_stack_space += + kNumCalleeSaved * kPointerSize + kPointerSize /* ra */; + + // Save callee-saved FPU registers. + __ MultiPushFPU(kCalleeSavedFPU); + pushed_stack_space += kNumCalleeSavedFPU * kDoubleSize; + + // Set up the reserved register for 0.0. + __ Move(kDoubleRegZero, 0.0); + + // Initialize the root register. + // C calling convention. The first argument is passed in a0. + __ mov(kRootRegister, a0); + } + + // We build an EntryFrame. + __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. + __ li(t2, Operand(StackFrame::TypeToMarker(type))); + __ li(t1, Operand(StackFrame::TypeToMarker(type))); + __ li(t4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ lw(t0, MemOperand(t4)); + __ Push(t3, t2, t1, t0); + pushed_stack_space += 4 * kPointerSize; + + // Clear c_entry_fp, now we've pushed its previous value to the stack. + // If the c_entry_fp is not already zero and we don't clear it, the + // SafeStackFrameIterator will assume we are executing C++ and miss the JS + // frames on top. + __ Sw(zero_reg, MemOperand(t4)); + + // Set up frame pointer for the frame to be pushed. + __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); + pushed_stack_space += EntryFrameConstants::kCallerFPOffset; + + // Registers: + // a0: root_register_value + // + // Stack: + // caller fp | + // function slot | entry frame + // context slot | + // bad fp (0xFF...F) | + // callee saved registers + ra + // 4 args slots + + // If this is the outermost JS call, set js_entry_sp value. + Label non_outermost_js; + ExternalReference js_entry_sp = ExternalReference::Create( + IsolateAddressId::kJSEntrySPAddress, masm->isolate()); + __ li(t1, js_entry_sp); + __ lw(t2, MemOperand(t1)); + __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg)); + __ sw(fp, MemOperand(t1)); + __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + Label cont; + __ b(&cont); + __ nop(); // Branch delay slot nop. + __ bind(&non_outermost_js); + __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME)); + __ bind(&cont); + __ push(t0); + + // Jump to a faked try block that does the invoke, with a faked catch + // block that sets the pending exception. + __ jmp(&invoke); + __ bind(&handler_entry); + + // Store the current pc as the handler offset. It's used later to create the + // handler table. + masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos()); + + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. Coming in here the + // fp will be invalid because the PushStackHandler below sets it to 0 to + // signal the existence of the JSEntry frame. + __ li(t0, ExternalReference::Create( + IsolateAddressId::kPendingExceptionAddress, masm->isolate())); + __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. + __ LoadRoot(v0, RootIndex::kException); + __ b(&exit); // b exposes branch delay slot. + __ nop(); // Branch delay slot nop. + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + __ PushStackHandler(); + // If an exception not caught by another handler occurs, this handler + // returns control to the code after the bal(&invoke) above, which + // restores all kCalleeSaved registers (including cp and fp) to their + // saved values before returning a failure to C. + // + // Preserve a1, a2 and a3 passed by C++ and pass them to the trampoline. + // + // Stack: + // handler frame + // entry frame + // callee saved registers + ra + // 4 args slots + // + // Invoke the function by calling through JS entry trampoline builtin and + // pop the faked function when we return. + Handle trampoline_code = + masm->isolate()->builtins()->code_handle(entry_trampoline); + DCHECK_EQ(kPushedStackSpace, pushed_stack_space); + USE(pushed_stack_space); + __ Call(trampoline_code, RelocInfo::CODE_TARGET); + + // Unlink this frame from the handler chain. + __ PopStackHandler(); + + __ bind(&exit); // v0 holds result + // Check if the current stack frame is marked as the outermost JS frame. + Label non_outermost_js_2; + __ pop(t1); + __ Branch(&non_outermost_js_2, ne, t1, + Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + __ li(t1, ExternalReference(js_entry_sp)); + __ sw(zero_reg, MemOperand(t1)); + __ bind(&non_outermost_js_2); + + // Restore the top frame descriptors from the stack. + __ pop(t1); + __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ sw(t1, MemOperand(t0)); + + // Reset the stack to the callee saved registers. + __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); + + // Restore callee-saved fpu registers. + __ MultiPopFPU(kCalleeSavedFPU); + + // Restore callee saved registers from the stack. + __ MultiPop(kCalleeSaved | ra); + // Return. + __ Jump(ra); +} + +} // namespace + +void Builtins::Generate_JSEntry(MacroAssembler* masm) { + Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline); +} + +void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) { + Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY, + Builtin::kJSConstructEntryTrampoline); +} + +void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) { + Generate_JSEntryVariant(masm, StackFrame::ENTRY, + Builtin::kRunMicrotasksTrampoline); +} + +static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, + bool is_construct) { + // ----------- S t a t e ------------- + // -- a0: root_register_value (unused) + // -- a1: new.target + // -- a2: function + // -- a3: receiver_pointer + // -- [fp + kPushedStackSpace + 0 * kPointerSize]: argc + // -- [fp + kPushedStackSpace + 1 * kPointerSize]: argv + // ----------------------------------- + + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Setup the context (we need to use the caller context from the isolate). + ExternalReference context_address = ExternalReference::Create( + IsolateAddressId::kContextAddress, masm->isolate()); + __ li(cp, context_address); + __ lw(cp, MemOperand(cp)); + + // Push the function onto the stack. + __ Push(a2); + + __ lw(s0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ lw(a0, + MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgcOffset)); + __ lw(s0, + MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgvOffset)); + + // Check if we have enough stack space to push all arguments. + // Clobbers a2 and t0. + __ mov(t1, a0); + Generate_CheckStackOverflow(masm, t1, t0, t2); + + // Copy arguments to the stack. + // a0: argc + // s0: argv, i.e. points to first arg + Generate_PushArguments(masm, s0, a0, t2, t0, ArgumentsElementType::kHandle); + + // Push the receiver. + __ Push(a3); + + // a0: argc + // a1: function + // a3: new.target + __ mov(a3, a1); + __ mov(a1, a2); + + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(t0, RootIndex::kUndefinedValue); + __ mov(s0, t0); + __ mov(s1, t0); + __ mov(s2, t0); + __ mov(s3, t0); + __ mov(s4, t0); + __ mov(s5, t0); + // s6 holds the root address. Do not clobber. + // s7 is cp. Do not init. + + // Invoke the code. + Handle builtin = is_construct + ? BUILTIN_CODE(masm->isolate(), Construct) + : masm->isolate()->builtins()->Call(); + __ Call(builtin, RelocInfo::CODE_TARGET); + + // Leave internal frame. + } + + __ Jump(ra); +} + +void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, false); +} + +void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, true); +} + +void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { + // a1: microtask_queue + __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1); + __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); +} + +static void AssertCodeIsBaseline(MacroAssembler* masm, Register code, + Register scratch) { + DCHECK(!AreAliased(code, scratch)); + // Verify that the code kind is baseline code via the CodeKind. + __ lw(scratch, FieldMemOperand(code, Code::kFlagsOffset)); + __ DecodeField(scratch); + __ Assert(eq, AbortReason::kExpectedBaselineData, scratch, + Operand(static_cast(CodeKind::BASELINE))); +} + +static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, + Register sfi_data, + Register scratch1, + Label* is_baseline) { + ASM_CODE_COMMENT(masm); + Label done; + + __ GetObjectType(sfi_data, scratch1, scratch1); + if (FLAG_debug_code) { + Label not_baseline; + __ Branch(¬_baseline, ne, scratch1, Operand(CODET_TYPE)); + AssertCodeIsBaseline(masm, sfi_data, scratch1); + __ Branch(is_baseline); + __ bind(¬_baseline); + } else { + __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE)); + } + __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); + __ lw(sfi_data, + FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); + + __ bind(&done); +} + +// static +void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- v0 : the value to pass to the generator + // -- a1 : the JSGeneratorObject to resume + // -- ra : return address + // ----------------------------------- + + // Store input value into generator object. + __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); + __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3, + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore); + + // Check that a1 is still valid, RecordWrite might have clobbered it. + __ AssertGeneratorObject(a1); + + // Load suspended function and context. + __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset)); + + // Flood function if we are stepping. + Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; + Label stepping_prepared; + ExternalReference debug_hook = + ExternalReference::debug_hook_on_function_call_address(masm->isolate()); + __ li(t1, debug_hook); + __ lb(t1, MemOperand(t1)); + __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg)); + + // Flood function if we need to continue stepping in the suspended generator. + ExternalReference debug_suspended_generator = + ExternalReference::debug_suspended_generator_address(masm->isolate()); + __ li(t1, debug_suspended_generator); + __ lw(t1, MemOperand(t1)); + __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1)); + __ bind(&stepping_prepared); + + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + Label stack_overflow; + __ LoadStackLimit(kScratchReg, + MacroAssembler::StackLimitKind::kRealStackLimit); + __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); + + // ----------- S t a t e ------------- + // -- a1 : the JSGeneratorObject to resume + // -- t0 : generator function + // -- cp : generator context + // -- ra : return address + // ----------------------------------- + + // Copy the function arguments from the generator object's register file. + + __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); + __ lhu(a3, + FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + __ Subu(a3, a3, Operand(kJSArgcReceiverSlots)); + __ lw(t1, + FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); + { + Label done_loop, loop; + __ bind(&loop); + __ Subu(a3, a3, Operand(1)); + __ Branch(&done_loop, lt, a3, Operand(zero_reg)); + __ Lsa(kScratchReg, t1, a3, kPointerSizeLog2); + __ Lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); + __ Push(kScratchReg); + __ Branch(&loop); + __ bind(&done_loop); + // Push receiver. + __ Lw(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); + __ Push(kScratchReg); + } + + // Underlying function needs to have bytecode available. + if (FLAG_debug_code) { + Label is_baseline; + __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); + __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); + GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline); + __ GetObjectType(a3, a3, a3); + __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, + Operand(BYTECODE_ARRAY_TYPE)); + __ bind(&is_baseline); + } + + // Resume (Ignition/TurboFan) generator object. + { + __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); + __ lhu(a0, FieldMemOperand( + a0, SharedFunctionInfo::kFormalParameterCountOffset)); + // We abuse new.target both to indicate that this is a resume call and to + // pass in the generator object. In ordinary calls, new.target is always + // undefined because generator functions are non-constructable. + __ Move(a3, a1); + __ Move(a1, t0); + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); + __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag); + __ Jump(a2); + } + + __ bind(&prepare_step_in_if_stepping); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1, t0); + // Push hole as receiver since we do not use it for stepping. + __ PushRoot(RootIndex::kTheHoleValue); + __ CallRuntime(Runtime::kDebugOnFunctionCall); + __ Pop(a1); + } + __ Branch(USE_DELAY_SLOT, &stepping_prepared); + __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + + __ bind(&prepare_step_in_suspended_generator); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); + __ Pop(a1); + } + __ Branch(USE_DELAY_SLOT, &stepping_prepared); + __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + + __ bind(&stack_overflow); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowStackOverflow); + __ break_(0xCC); // This should be unreachable. + } +} + +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { + ASM_CODE_COMMENT(masm); + // Store code entry in the closure. + __ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); + __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. + __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); +} + +static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, + Register scratch2) { + ASM_CODE_COMMENT(masm); + Register params_size = scratch1; + + // Get the size of the formal parameters + receiver (in bytes). + __ lw(params_size, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ lw(params_size, + FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset)); + + Register actual_params_size = scratch2; + // Compute the size of the actual parameters + receiver (in bytes). + __ Lw(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); + __ sll(actual_params_size, actual_params_size, kPointerSizeLog2); + + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + __ slt(t2, params_size, actual_params_size); + __ movn(params_size, actual_params_size, t2); + + // Leave the frame (also dropping the register file). + __ LeaveFrame(StackFrame::INTERPRETED); + + // Drop receiver + arguments. + __ DropArguments(params_size, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); +} + +// Tail-call |function_id| if |actual_state| == |expected_state| +static void TailCallRuntimeIfStateEquals(MacroAssembler* masm, + Register actual_state, + TieringState expected_state, + Runtime::FunctionId function_id) { + ASM_CODE_COMMENT(masm); + Label no_match; + __ Branch(&no_match, ne, actual_state, + Operand(static_cast(expected_state))); + GenerateTailCallToReturnedCode(masm, function_id); + __ bind(&no_match); +} + +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch1, Register scratch2) { + // ----------- S t a t e ------------- + // -- a0 : actual argument count + // -- a3 : new target (preserved for callee if needed, and caller) + // -- a1 : target function (preserved for callee if needed, and caller) + // ----------------------------------- + DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2)); + + Register closure = a1; + Label heal_optimized_code_slot; + + // If the optimized code is cleared, go to runtime to update the optimization + // marker field. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, + &heal_optimized_code_slot); + + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + __ Lw(scratch1, + FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ Lw(scratch1, + FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset)); + __ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit)); + __ Branch(&heal_optimized_code_slot, ne, scratch1, Operand(zero_reg)); + + // Optimized code is good, get it into the closure and link the closure into + // the optimized functions list, then tail call the optimized code. + // The feedback vector is no longer used, so re-use it as a scratch + // register. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch1, scratch2); + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag); + __ Jump(a2); + + // Optimized code slot contains deoptimized code or code is cleared and + // optimized code marker isn't updated. Evict the code, update the marker + // and re-enter the closure's code. + __ bind(&heal_optimized_code_slot); + GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot); +} + +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register tiering_state) { + // ----------- S t a t e ------------- + // -- a0 : actual argument count + // -- a3 : new target (preserved for callee if needed, and caller) + // -- a1 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- tiering_state : a int32 containing a non-zero optimization + // marker. + // ----------------------------------- + ASM_CODE_COMMENT(masm); + DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state)); + + TailCallRuntimeIfStateEquals(masm, tiering_state, + TieringState::kRequestTurbofan_Synchronous, + Runtime::kCompileTurbofan_Synchronous); + TailCallRuntimeIfStateEquals(masm, tiering_state, + TieringState::kRequestTurbofan_Concurrent, + Runtime::kCompileTurbofan_Concurrent); + + __ stop(); +} + +// Advance the current bytecode offset. This simulates what all bytecode +// handlers do upon completion of the underlying operation. Will bail out to a +// label if the bytecode (without prefix) is a return bytecode. Will not advance +// the bytecode offset if the current bytecode is a JumpLoop, instead just +// re-executing the JumpLoop to jump to the correct bytecode. +static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, + Register bytecode_array, + Register bytecode_offset, + Register bytecode, Register scratch1, + Register scratch2, Register scratch3, + Label* if_return) { + ASM_CODE_COMMENT(masm); + Register bytecode_size_table = scratch1; + + // The bytecode offset value will be increased by one in wide and extra wide + // cases. In the case of having a wide or extra wide JumpLoop bytecode, we + // will restore the original bytecode. In order to simplify the code, we have + // a backup of it. + Register original_bytecode_offset = scratch3; + DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode, + bytecode_size_table, original_bytecode_offset)); + __ Move(original_bytecode_offset, bytecode_offset); + __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); + + // Check if the bytecode is a Wide or ExtraWide prefix bytecode. + Label process_bytecode, extra_wide; + STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); + STATIC_ASSERT(1 == static_cast(interpreter::Bytecode::kExtraWide)); + STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); + STATIC_ASSERT(3 == + static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); + __ Branch(&process_bytecode, hi, bytecode, Operand(3)); + __ And(scratch2, bytecode, Operand(1)); + __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg)); + + // Load the next bytecode and update table to the wide scaled table. + __ Addu(bytecode_offset, bytecode_offset, Operand(1)); + __ Addu(scratch2, bytecode_array, bytecode_offset); + __ lbu(bytecode, MemOperand(scratch2)); + __ Addu(bytecode_size_table, bytecode_size_table, + Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount)); + __ jmp(&process_bytecode); + + __ bind(&extra_wide); + // Load the next bytecode and update table to the extra wide scaled table. + __ Addu(bytecode_offset, bytecode_offset, Operand(1)); + __ Addu(scratch2, bytecode_array, bytecode_offset); + __ lbu(bytecode, MemOperand(scratch2)); + __ Addu(bytecode_size_table, bytecode_size_table, + Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount)); + + __ bind(&process_bytecode); + +// Bailout to the return label if this is a return bytecode. +#define JUMP_IF_EQUAL(NAME) \ + __ Branch(if_return, eq, bytecode, \ + Operand(static_cast(interpreter::Bytecode::k##NAME))); + RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) +#undef JUMP_IF_EQUAL + + // If this is a JumpLoop, re-execute it to perform the jump to the beginning + // of the loop. + Label end, not_jump_loop; + __ Branch(¬_jump_loop, ne, bytecode, + Operand(static_cast(interpreter::Bytecode::kJumpLoop))); + // We need to restore the original bytecode_offset since we might have + // increased it to skip the wide / extra-wide prefix bytecode. + __ Move(bytecode_offset, original_bytecode_offset); + __ jmp(&end); + + __ bind(¬_jump_loop); + // Otherwise, load the size of the current bytecode and advance the offset. + __ Addu(scratch2, bytecode_size_table, bytecode); + __ lb(scratch2, MemOperand(scratch2)); + __ Addu(bytecode_offset, bytecode_offset, scratch2); + + __ bind(&end); +} + +// Read off the optimization state in the feedback vector and check if there +// is optimized code or a tiering state that needs to be processed. +static void LoadTieringStateAndJumpIfNeedsProcessing( + MacroAssembler* masm, Register optimization_state, Register feedback_vector, + Label* has_optimized_code_or_state) { + ASM_CODE_COMMENT(masm); + Register scratch = t6; + __ Lw(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); + __ And( + scratch, optimization_state, + Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); + __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg)); +} + +static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( + MacroAssembler* masm, Register optimization_state, + Register feedback_vector) { + ASM_CODE_COMMENT(masm); + Label maybe_has_optimized_code; + // Check if optimized code marker is available + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ And(scratch, optimization_state, + Operand(FeedbackVector::kTieringStateIsAnyRequestMask)); + __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); + } + + Register tiering_state = optimization_state; + __ DecodeField(tiering_state); + MaybeOptimizeCode(masm, feedback_vector, tiering_state); + + __ bind(&maybe_has_optimized_code); + Register optimized_code_entry = optimization_state; + __ Lw(tiering_state, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); + + TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3); +} + +namespace { +void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, + Register bytecode_array) { + // Reset code age and the OSR state (optimized to a single write). + static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + __ sw(zero_reg, + FieldMemOperand(bytecode_array, + BytecodeArray::kOsrUrgencyAndInstallTargetOffset)); +} + +} // namespace + +// static +void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { + UseScratchRegisterScope temps(masm); + temps.Include({s1, s2}); + auto descriptor = + Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); + Register closure = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + // Load the feedback vector from the closure. + Register feedback_vector = temps.Acquire(); + __ Lw(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ Lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ GetObjectType(feedback_vector, scratch, scratch); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch, + Operand(FEEDBACK_VECTOR_TYPE)); + } + // Check for an tiering state. + Label has_optimized_code_or_state; + Register optimization_state = no_reg; + { + UseScratchRegisterScope temps(masm); + optimization_state = temps.Acquire(); + // optimization_state will be used only in |has_optimized_code_or_state| + // and outside it can be reused. + LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, + feedback_vector, + &has_optimized_code_or_state); + } + // Increment invocation count for the function. + { + UseScratchRegisterScope temps(masm); + Register invocation_count = temps.Acquire(); + __ Lw(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + __ Addu(invocation_count, invocation_count, Operand(1)); + __ Sw(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + } + + FrameScope frame_scope(masm, StackFrame::MANUAL); + { + ASM_CODE_COMMENT_STRING(masm, "Frame Setup"); + // Normally the first thing we'd do here is Push(ra, fp), but we already + // entered the frame in BaselineCompiler::Prologue, as we had to use the + // value ra before the call to this BaselineOutOfLinePrologue builtin. + Register callee_context = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kCalleeContext); + Register callee_js_function = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + __ Push(callee_context, callee_js_function); + DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); + DCHECK_EQ(callee_js_function, kJSFunctionRegister); + + Register argc = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount); + // We'll use the bytecode for both code age/OSR resetting, and pushing onto + // the frame, so load it into a register. + Register bytecode_array = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); + ResetBytecodeAgeAndOsrState(masm, bytecode_array); + __ Push(argc, bytecode_array); + + // Baseline code frames store the feedback vector where interpreter would + // store the bytecode offset. + if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register invocation_count = temps.Acquire(); + __ GetObjectType(feedback_vector, invocation_count, invocation_count); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count, + Operand(FEEDBACK_VECTOR_TYPE)); + } + // Our stack is currently aligned. We have have to push something along with + // the feedback vector to keep it that way -- we may as well start + // initialising the register frame. + // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves + // `undefined` in the accumulator register, to skip the load in the baseline + // code. + __ Push(feedback_vector); + } + + Label call_stack_guard; + Register frame_size = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kStackFrameSize); + { + ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check"); + // Stack check. This folds the checks for both the interrupt stack limit + // check and the real stack limit into one by just checking for the + // interrupt limit. The interrupt limit is either equal to the real stack + // limit or tighter. By ensuring we have space until that limit after + // building the frame we can quickly precheck both at once. + UseScratchRegisterScope temps(masm); + Register sp_minus_frame_size = temps.Acquire(); + __ Subu(sp_minus_frame_size, sp, frame_size); + Register interrupt_limit = temps.Acquire(); + __ LoadStackLimit(interrupt_limit, + MacroAssembler::StackLimitKind::kInterruptStackLimit); + __ Branch(&call_stack_guard, Uless, sp_minus_frame_size, + Operand(interrupt_limit)); + } + + // Do "fast" return to the caller pc in ra. + // TODO(v8:11429): Document this frame setup better. + __ Ret(); + + __ bind(&has_optimized_code_or_state); + { + ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); + UseScratchRegisterScope temps(masm); + temps.Exclude(optimization_state); + // Ensure the optimization_state is not allocated again. + // Drop the frame created by the baseline call. + __ Pop(ra, fp); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); + __ Trap(); + } + + __ bind(&call_stack_guard); + { + ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); + FrameScope frame_scope(masm, StackFrame::INTERNAL); + // Save incoming new target or generator + __ Push(kJavaScriptCallNewTargetRegister); + __ SmiTag(frame_size); + __ Push(frame_size); + __ CallRuntime(Runtime::kStackGuardWithGap); + __ Pop(kJavaScriptCallNewTargetRegister); + } + __ Ret(); + temps.Exclude({kScratchReg, kScratchReg2}); +} + +// Generate code for entering a JS function with the interpreter. +// On entry to the function the receiver and arguments have been pushed on the +// stack left to right. +// +// The live registers are: +// o a0 : actual argument count +// o a1: the JS function object being called. +// o a3: the incoming new target or generator object +// o cp: our context +// o fp: the caller's frame pointer +// o sp: stack pointer +// o ra: return address +// +// The function builds an interpreter frame. See InterpreterFrameConstants in +// frame-constants.h for its layout. +void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { + Register closure = a1; + Register feedback_vector = a2; + + // Get the bytecode array from the function object and load it into + // kInterpreterBytecodeArrayRegister. + __ lw(kScratchReg, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ lw(kInterpreterBytecodeArrayRegister, + FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); + Label is_baseline; + GetSharedFunctionInfoBytecodeOrBaseline( + masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline); + + // The bytecode array could have been flushed from the shared function info, + // if so, call into CompileLazy. + Label compile_lazy; + __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg); + __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE)); + + // Load the feedback vector from the closure. + __ lw(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); + __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE)); + + // Read off the optimization state in the feedback vector, and if there + // is optimized code or an tiering state, call that instead. + Register optimization_state = t0; + __ Lw(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); + + // Check if the optimized code slot is not empty or has a tiering state. + Label has_optimized_code_or_state; + + __ andi(t1, optimization_state, + FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask); + __ Branch(&has_optimized_code_or_state, ne, t1, Operand(zero_reg)); + + Label not_optimized; + __ bind(¬_optimized); + + // Increment invocation count for the function. + __ lw(t0, FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + __ Addu(t0, t0, Operand(1)); + __ sw(t0, FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ PushStandardFrame(closure); + + ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister); + + // Load initial bytecode offset. + __ li(kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + + // Push bytecode array and Smi tagged bytecode array offset. + __ SmiTag(t0, kInterpreterBytecodeOffsetRegister); + __ Push(kInterpreterBytecodeArrayRegister, t0); + + // Allocate the local and temporary register file on the stack. + Label stack_overflow; + { + // Load frame size from the BytecodeArray object. + __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kFrameSizeOffset)); + + // Do a stack check to ensure we don't go over the limit. + __ Subu(t1, sp, Operand(t0)); + __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit); + __ Branch(&stack_overflow, lo, t1, Operand(a2)); + + // If ok, push undefined as the initial value for all register file entries. + Label loop_header; + Label loop_check; + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + __ Branch(&loop_check); + __ bind(&loop_header); + // TODO(rmcilroy): Consider doing more than one push per loop iteration. + __ push(kInterpreterAccumulatorRegister); + // Continue loop if not done. + __ bind(&loop_check); + __ Subu(t0, t0, Operand(kPointerSize)); + __ Branch(&loop_header, ge, t0, Operand(zero_reg)); + } + + // If the bytecode array has a valid incoming new target or generator object + // register, initialize it with incoming value which was passed in r3. + Label no_incoming_new_target_or_generator_register; + __ lw(t1, FieldMemOperand( + kInterpreterBytecodeArrayRegister, + BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); + __ Branch(&no_incoming_new_target_or_generator_register, eq, t1, + Operand(zero_reg)); + __ Lsa(t1, fp, t1, kPointerSizeLog2); + __ sw(a3, MemOperand(t1)); + __ bind(&no_incoming_new_target_or_generator_register); + + // Perform interrupt stack check. + // TODO(solanes): Merge with the real stack limit check above. + Label stack_check_interrupt, after_stack_check_interrupt; + __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kInterruptStackLimit); + __ Branch(&stack_check_interrupt, lo, sp, Operand(a2)); + __ bind(&after_stack_check_interrupt); + + // The accumulator is already loaded with undefined. + + // Load the dispatch table into a register and dispatch to the bytecode + // handler at the current bytecode offset. + Label do_dispatch; + __ bind(&do_dispatch); + __ li(kInterpreterDispatchTableRegister, + ExternalReference::interpreter_dispatch_table_address(masm->isolate())); + __ Addu(a0, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ lbu(t3, MemOperand(a0)); + __ Lsa(kScratchReg, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2); + __ lw(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg)); + __ Call(kJavaScriptCallCodeStartRegister); + masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); + + // Any returns to the entry trampoline are either due to the return bytecode + // or the interpreter tail calling a builtin and then a dispatch. + + // Get bytecode array and bytecode offset from the stack frame. + __ lw(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ lw(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + // Either return, or advance to the next bytecode and dispatch. + Label do_return; + __ Addu(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ lbu(a1, MemOperand(a1)); + AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, a1, a2, a3, + t0, &do_return); + __ jmp(&do_dispatch); + + __ bind(&do_return); + // The return value is in v0. + LeaveInterpreterFrame(masm, t0, t1); + __ Jump(ra); + + __ bind(&stack_check_interrupt); + // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset + // for the call to the StackGuard. + __ li(kInterpreterBytecodeOffsetRegister, + Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset))); + __ Sw(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ CallRuntime(Runtime::kStackGuard); + + // After the call, restore the bytecode array, bytecode offset and accumulator + // registers again. Also, restore the bytecode offset in the stack to its + // previous value. + __ Lw(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ li(kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + + __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); + __ Sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + + __ jmp(&after_stack_check_interrupt); + + __ bind(&has_optimized_code_or_state); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); + __ bind(&is_baseline); + { + // Load the feedback vector from the closure. + __ Lw(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ Lw(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + __ Lw(t4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ lhu(t4, FieldMemOperand(t4, Map::kInstanceTypeOffset)); + __ Branch(&install_baseline_code, ne, t4, Operand(FEEDBACK_VECTOR_TYPE)); + + // Check for an tiering state. + LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, + feedback_vector, + &has_optimized_code_or_state); + + // Load the baseline code into the closure. + __ Move(a2, kInterpreterBytecodeArrayRegister); + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5); + __ JumpCodeObject(a2); + + __ bind(&install_baseline_code); + GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode); + } + + __ bind(&compile_lazy); + GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + // Unreachable code. + __ break_(0xCC); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); +} + +static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, + Register start_address, + Register scratch, Register scratch2) { + ASM_CODE_COMMENT(masm); + // Find the address of the last argument. + __ Subu(scratch, num_args, Operand(1)); + __ sll(scratch, scratch, kPointerSizeLog2); + __ Subu(start_address, start_address, scratch); + + // Push the arguments. + __ PushArray(start_address, num_args, scratch, scratch2, + TurboAssembler::PushArrayOrder::kReverse); +} + +// static +void Builtins::Generate_InterpreterPushArgsThenCallImpl( + MacroAssembler* masm, ConvertReceiverMode receiver_mode, + InterpreterPushArgsMode mode) { + DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a2 : the address of the first argument to be pushed. Subsequent + // arguments should be consecutive above this, in the same order as + // they are to be pushed onto the stack. + // -- a1 : the target to call (can be any Object). + // ----------------------------------- + Label stack_overflow; + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // The spread argument should not be pushed. + __ Subu(a0, a0, Operand(1)); + } + + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { + __ Subu(t0, a0, Operand(kJSArgcReceiverSlots)); + } else { + __ mov(t0, a0); + } + + __ StackOverflowCheck(t0, t4, t1, &stack_overflow); + + // This function modifies a2, t4 and t1. + GenerateInterpreterPushArgs(masm, t0, a2, t4, t1); + + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { + __ PushRoot(RootIndex::kUndefinedValue); + } + + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // Pass the spread in the register a2. + // a2 already points to the penultime argument, the spread + // is below that. + __ Lw(a2, MemOperand(a2, -kSystemPointerSize)); + } + + // Call the target. + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), + RelocInfo::CODE_TARGET); + } else { + __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), + RelocInfo::CODE_TARGET); + } + + __ bind(&stack_overflow); + { + __ TailCallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); + } +} + +// static +void Builtins::Generate_InterpreterPushArgsThenConstructImpl( + MacroAssembler* masm, InterpreterPushArgsMode mode) { + // ----------- S t a t e ------------- + // -- a0 : argument count + // -- a3 : new target + // -- a1 : constructor to call + // -- a2 : allocation site feedback if available, undefined otherwise. + // -- t4 : address of the first argument + // ----------------------------------- + Label stack_overflow; + __ StackOverflowCheck(a0, t1, t0, &stack_overflow); + + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // The spread argument should not be pushed. + __ Subu(a0, a0, Operand(1)); + } + + Register argc_without_receiver = t2; + __ Subu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots)); + + GenerateInterpreterPushArgs(masm, argc_without_receiver, t4, t1, t0); + + // Push a slot for the receiver. + __ push(zero_reg); + + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // Pass the spread in the register a2. + // t4 already points to the penultimate argument, the spread + // lies in the next interpreter register. + // __ Subu(t4, t4, Operand(kSystemPointerSize)); + __ Lw(a2, MemOperand(t4, -kSystemPointerSize)); + } else { + __ AssertUndefinedOrAllocationSite(a2, t0); + } + + if (mode == InterpreterPushArgsMode::kArrayFunction) { + __ AssertFunction(a1); + + // Tail call to the array construct stub (still in the caller + // context at this point). + __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), + RelocInfo::CODE_TARGET); + } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // Call the constructor with a0, a1, and a3 unmodified. + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), + RelocInfo::CODE_TARGET); + } else { + DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); + // Call the constructor with a0, a1, and a3 unmodified. + __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); + } + + __ bind(&stack_overflow); + { + __ TailCallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); + } +} + +static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { + // Set the return address to the correct point in the interpreter entry + // trampoline. + Label builtin_trampoline, trampoline_loaded; + Smi interpreter_entry_return_pc_offset( + masm->isolate()->heap()->interpreter_entry_return_pc_offset()); + DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); + + // If the SFI function_data is an InterpreterData, the function will have a + // custom copy of the interpreter entry trampoline for profiling. If so, + // get the custom trampoline, otherwise grab the entry address of the global + // trampoline. + __ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + __ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); + __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); + __ GetObjectType(t0, kInterpreterDispatchTableRegister, + kInterpreterDispatchTableRegister); + __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, + Operand(INTERPRETER_DATA_TYPE)); + + __ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); + __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Branch(&trampoline_loaded); + + __ bind(&builtin_trampoline); + __ li(t0, ExternalReference:: + address_of_interpreter_entry_trampoline_instruction_start( + masm->isolate())); + __ lw(t0, MemOperand(t0)); + + __ bind(&trampoline_loaded); + __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset.value())); + + // Initialize the dispatch table register. + __ li(kInterpreterDispatchTableRegister, + ExternalReference::interpreter_dispatch_table_address(masm->isolate())); + + // Get the bytecode array pointer from the frame. + __ lw(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + + if (FLAG_debug_code) { + // Check function data field is actually a BytecodeArray object. + __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + kScratchReg, Operand(zero_reg)); + __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); + __ Assert(eq, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + a1, Operand(BYTECODE_ARRAY_TYPE)); + } + + // Get the target bytecode offset from the frame. + __ lw(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + + if (FLAG_debug_code) { + Label okay; + __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + // Unreachable code. + __ break_(0xCC); + __ bind(&okay); + } + + // Dispatch to the target bytecode. + __ Addu(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ lbu(t3, MemOperand(a1)); + __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2); + __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1)); + __ Jump(kJavaScriptCallCodeStartRegister); +} + +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { + // Advance the current bytecode offset stored within the given interpreter + // stack frame. This simulates what all bytecode handlers do upon completion + // of the underlying operation. + __ lw(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ lw(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + + Label enter_bytecode, function_entry_bytecode; + __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset)); + + // Load the current bytecode. + __ Addu(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ lbu(a1, MemOperand(a1)); + + // Advance to the next bytecode. + Label if_return; + AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, a1, a2, a3, + t0, &if_return); + + __ bind(&enter_bytecode); + // Convert new bytecode offset to a Smi and save in the stackframe. + __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); + __ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + + Generate_InterpreterEnterBytecode(masm); + + __ bind(&function_entry_bytecode); + // If the code deoptimizes during the implicit function entry stack interrupt + // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is + // not a valid bytecode offset. Detect this case and advance to the first + // actual bytecode. + __ li(kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + __ Branch(&enter_bytecode); + + // We should never take the if_return path. + __ bind(&if_return); + __ Abort(AbortReason::kInvalidBytecodeAdvance); +} + +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { + Generate_InterpreterEnterBytecode(masm); +} + +namespace { +void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, + bool java_script_builtin, + bool with_result) { + const RegisterConfiguration* config(RegisterConfiguration::Default()); + int allocatable_register_count = config->num_allocatable_general_registers(); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); // Temp register is not allocatable. + // Register scratch = t3; + if (with_result) { + if (java_script_builtin) { + __ mov(scratch, v0); + } else { + // Overwrite the hole inserted by the deoptimizer with the return value + // from the LAZY deopt point. + __ sw(v0, + MemOperand( + sp, config->num_allocatable_general_registers() * kPointerSize + + BuiltinContinuationFrameConstants::kFixedFrameSize)); + } + } + for (int i = allocatable_register_count - 1; i >= 0; --i) { + int code = config->GetAllocatableGeneralCode(i); + __ Pop(Register::from_code(code)); + if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { + __ SmiUntag(Register::from_code(code)); + } + } + + if (with_result && java_script_builtin) { + // Overwrite the hole inserted by the deoptimizer with the return value from + // the LAZY deopt point. t0 contains the arguments count, the return value + // from LAZY is always the last argument. + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ Addu(a0, a0, Operand(return_value_offset)); + __ Lsa(t0, sp, a0, kSystemPointerSizeLog2); + __ Sw(scratch, MemOperand(t0)); + // Recover arguments count. + __ Subu(a0, a0, Operand(return_value_offset)); + } + + __ lw(fp, MemOperand( + sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. + __ Pop(t0); + __ Addu(sp, sp, + Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + __ Pop(ra); + __ LoadEntryFromBuiltinIndex(t0); + __ Jump(t0); +} +} // namespace + +void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, false, false); +} + +void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( + MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, false, true); +} + +void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, true, false); +} + +void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( + MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, true, true); +} + +void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyDeoptimized); + } + + DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code()); + __ lw(v0, MemOperand(sp, 0 * kPointerSize)); + __ Ret(USE_DELAY_SLOT); + // Safe to fill delay slot Addu will emit one instruction. + __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove accumulator. +} + +namespace { + +void Generate_OSREntry(MacroAssembler* masm, Register entry_address, + Operand offset = Operand(zero_reg)) { + __ Addu(ra, entry_address, offset); + // And "return" to the OSR entry point of the function. + __ Ret(); +} + +void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kCompileOptimizedOSR); + } + + // If the code object is null, just return to the caller. + __ Ret(eq, v0, Operand(Smi::zero())); + + if (is_interpreter) { + // Drop the handler frame that is be sitting on top of the actual + // JavaScript frame. This is the case then OSR is triggered from bytecode. + __ LeaveFrame(StackFrame::STUB); + } + // Load deoptimization data from the code object. + // = [#deoptimization_data_offset] + __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset - + kHeapObjectTag)); + + // Load the OSR entrypoint offset from the deoptimization data. + // = [#header_size + #osr_pc_offset] + __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( + DeoptimizationData::kOsrPcOffsetIndex) - + kHeapObjectTag)); + __ SmiUntag(a1); + + // Compute the target address = code_obj + header_size + osr_offset + // = + #header_size + + __ Addu(v0, v0, a1); + Generate_OSREntry(masm, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); +} +} // namespace + +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { + return OnStackReplacement(masm, true); +} + +void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ Lw(kContextRegister, + MemOperand(fp, StandardFrameConstants::kContextOffset)); + return OnStackReplacement(masm, false); +} + +// static +void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : receiver + // -- sp[4] : thisArg + // -- sp[8] : argArray + // ----------------------------------- + + // 1. Load receiver into a1, argArray into a2 (if present), remove all + // arguments from the stack (including the receiver), and push thisArg (if + // present) instead. + { + Label no_arg; + __ LoadRoot(a2, RootIndex::kUndefinedValue); + __ mov(a3, a2); + // Lsa() cannot be used hare as scratch value used later. + __ lw(a1, MemOperand(sp)); // receiver + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); + __ lw(a3, MemOperand(sp, kSystemPointerSize)); // thisArg + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); + __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argArray + __ bind(&no_arg); + __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); + } + + // ----------- S t a t e ------------- + // -- a2 : argArray + // -- a1 : receiver + // -- sp[0] : thisArg + // ----------------------------------- + + // 2. We don't need to check explicitly for callable receiver here, + // since that's the first thing the Call/CallWithArrayLike builtins + // will do. + + // 3. Tail call with no arguments if argArray is null or undefined. + Label no_arguments; + __ JumpIfRoot(a2, RootIndex::kNullValue, &no_arguments); + __ JumpIfRoot(a2, RootIndex::kUndefinedValue, &no_arguments); + + // 4a. Apply the receiver to the given argArray. + __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), + RelocInfo::CODE_TARGET); + + // 4b. The argArray is either null or undefined, so we tail call without any + // arguments to the receiver. + __ bind(&no_arguments); + { + __ li(a0, JSParameterCount(0)); + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + } +} + +// static +void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { + // 1. Get the callable to call (passed as receiver) from the stack. + __ Pop(a1); + + // 2. Make sure we have at least one argument. + // a0: actual number of arguments + { + Label done; + __ Branch(&done, ne, a0, Operand(JSParameterCount(0))); + __ PushRoot(RootIndex::kUndefinedValue); + __ Addu(a0, a0, Operand(1)); + __ bind(&done); + } + + // 3. Adjust the actual number of arguments. + __ addiu(a0, a0, -1); + + // 4. Call the callable. + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); +} + +void Builtins::Generate_ReflectApply(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : receiver + // -- sp[4] : target (if argc >= 1) + // -- sp[8] : thisArgument (if argc >= 2) + // -- sp[12] : argumentsList (if argc == 3) + // ----------------------------------- + + // 1. Load target into a1 (if present), argumentsList into a0 (if present), + // remove all arguments from the stack (including the receiver), and push + // thisArgument (if present) instead. + { + Label no_arg; + __ LoadRoot(a1, RootIndex::kUndefinedValue); + __ mov(a2, a1); + __ mov(a3, a1); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); + __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); + __ lw(a3, MemOperand(sp, 2 * kSystemPointerSize)); // thisArgument + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2))); + __ lw(a2, MemOperand(sp, 3 * kSystemPointerSize)); // argumentsList + __ bind(&no_arg); + __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); + } + + // ----------- S t a t e ------------- + // -- a2 : argumentsList + // -- a1 : target + // -- sp[0] : thisArgument + // ----------------------------------- + + // 2. We don't need to check explicitly for callable target here, + // since that's the first thing the Call/CallWithArrayLike builtins + // will do. + + // 3. Apply the target to the given argumentsList. + __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), + RelocInfo::CODE_TARGET); +} + +void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : receiver + // -- sp[4] : target + // -- sp[8] : argumentsList + // -- sp[12] : new.target (optional) + // ----------------------------------- + + // 1. Load target into a1 (if present), argumentsList into a2 (if present), + // new.target into a3 (if present, otherwise use target), remove all + // arguments from the stack (including the receiver), and push thisArgument + // (if present) instead. + { + Label no_arg; + __ LoadRoot(a1, RootIndex::kUndefinedValue); + __ mov(a2, a1); + __ mov(t0, a1); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); + __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target + __ mov(a3, a1); // new.target defaults to target + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); + __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argumentsList + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2))); + __ lw(a3, MemOperand(sp, 3 * kSystemPointerSize)); // new.target + __ bind(&no_arg); + __ DropArgumentsAndPushNewReceiver(a0, t0, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountIncludesReceiver); + } + + // ----------- S t a t e ------------- + // -- a2 : argumentsList + // -- a3 : new.target + // -- a1 : target + // -- sp[0] : receiver (undefined) + // ----------------------------------- + + // 2. We don't need to check explicitly for constructor target here, + // since that's the first thing the Construct/ConstructWithArrayLike + // builtins will do. + + // 3. We don't need to check explicitly for constructor new.target here, + // since that's the second thing the Construct/ConstructWithArrayLike + // builtins will do. + + // 4. Construct the target with the given new.target and argumentsList. + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike), + RelocInfo::CODE_TARGET); +} + +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out, Register scratch1, Register scratch2, + Register scratch3) { + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ mov(old_sp, sp); + __ sll(new_space, count, kPointerSizeLog2); + __ Subu(sp, sp, Operand(new_space)); + + Register end = scratch2; + Register value = scratch3; + Register dest = pointer_to_new_space_out; + __ mov(dest, sp); + __ Lsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2); + Label loop, done; + __ Branch(&done, ge, old_sp, Operand(end)); + __ bind(&loop); + __ lw(value, MemOperand(old_sp, 0)); + __ sw(value, MemOperand(dest, 0)); + __ Addu(old_sp, old_sp, Operand(kSystemPointerSize)); + __ Addu(dest, dest, Operand(kSystemPointerSize)); + __ Branch(&loop, lt, old_sp, Operand(end)); + __ bind(&done); + + // Update total number of arguments. + __ Addu(argc_in_out, argc_in_out, count); +} + +} // namespace + +// static +void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, + Handle code) { + // ----------- S t a t e ------------- + // -- a1 : target + // -- a0 : number of parameters on the stack + // -- a2 : arguments list (a FixedArray) + // -- t0 : len (number of elements to push from args) + // -- a3 : new.target (for [[Construct]]) + // ----------------------------------- + if (FLAG_debug_code) { + // Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0. + Label ok, fail; + __ AssertNotSmi(a2); + __ GetObjectType(a2, t8, t8); + __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE)); + __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE)); + __ Branch(&ok, eq, t0, Operand(0)); + // Fall through. + __ bind(&fail); + __ Abort(AbortReason::kOperandIsNotAFixedArray); + + __ bind(&ok); + } + + // Check for stack overflow. + Label stack_overflow; + __ StackOverflowCheck(t0, kScratchReg, t1, &stack_overflow); + + // Move the arguments already in the stack, + // including the receiver and the return address. + // t0: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // t4: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, t0, a0, t4, t3, t1, t2); + + // Push arguments onto the stack (thisArgument is already on the stack). + { + __ mov(t2, zero_reg); + Label done, push, loop; + __ LoadRoot(t1, RootIndex::kTheHoleValue); + __ bind(&loop); + __ Branch(&done, eq, t2, Operand(t0)); + __ Lsa(kScratchReg, a2, t2, kPointerSizeLog2); + __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); + __ Addu(t2, t2, Operand(1)); + __ Branch(&push, ne, t1, Operand(kScratchReg)); + __ LoadRoot(kScratchReg, RootIndex::kUndefinedValue); + __ bind(&push); + __ Sw(kScratchReg, MemOperand(t4, 0)); + __ Addu(t4, t4, Operand(kSystemPointerSize)); + __ Branch(&loop); + __ bind(&done); + } + + // Tail-call to the actual Call or Construct builtin. + __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); +} + +// static +void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, + CallOrConstructMode mode, + Handle code) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a3 : the new.target (for [[Construct]] calls) + // -- a1 : the target to call (can be any Object) + // -- a2 : start index (to support rest parameters) + // ----------------------------------- + + // Check if new.target has a [[Construct]] internal method. + if (mode == CallOrConstructMode::kConstruct) { + Label new_target_constructor, new_target_not_constructor; + __ JumpIfSmi(a3, &new_target_not_constructor); + __ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); + __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); + __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); + __ bind(&new_target_not_constructor); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ Push(a3); + __ CallRuntime(Runtime::kThrowNotConstructor); + } + __ bind(&new_target_constructor); + } + + Label stack_done, stack_overflow; + __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + __ Subu(t2, t2, Operand(kJSArgcReceiverSlots)); + __ Subu(t2, t2, a2); + __ Branch(&stack_done, le, t2, Operand(zero_reg)); + { + // Check for stack overflow. + __ StackOverflowCheck(t2, t0, t1, &stack_overflow); + + // Forward the arguments from the caller frame. + // Point to the first argument to copy (skipping the receiver). + __ Addu(t3, fp, + Operand(CommonFrameConstants::kFixedFrameSizeAboveFp + + kSystemPointerSize)); + __ Lsa(t3, t3, a2, kSystemPointerSizeLog2); + + // Move the arguments already in the stack, + // including the receiver and the return address. + // t2: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a2: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, t2, a0, a2, t5, t6, + t7); + + // Copy arguments from the caller frame. + // TODO(victorgomes): Consider using forward order as potentially more cache + // friendly. + { + Label loop; + __ bind(&loop); + { + __ Subu(t2, t2, Operand(1)); + __ Lsa(kScratchReg, t3, t2, kPointerSizeLog2); + __ lw(kScratchReg, MemOperand(kScratchReg)); + __ Lsa(t0, a2, t2, kPointerSizeLog2); + __ Sw(kScratchReg, MemOperand(t0)); + __ Branch(&loop, ne, t2, Operand(zero_reg)); + } + } + } + __ Branch(&stack_done); + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); + __ bind(&stack_done); + + // Tail-call to the {code} handler. + __ Jump(code, RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_CallFunction(MacroAssembler* masm, + ConvertReceiverMode mode) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSFunction) + // ----------------------------------- + __ AssertCallableFunction(a1); + + __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + + // Enter the context of the function; ToObject has to run in the function + // context, and we also need to take the global proxy from the function + // context in case of conversion. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + // We need to convert the receiver for non-native sloppy mode functions. + Label done_convert; + __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); + __ And(kScratchReg, a3, + Operand(SharedFunctionInfo::IsNativeBit::kMask | + SharedFunctionInfo::IsStrictBit::kMask)); + __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); + { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSFunction) + // -- a2 : the shared function info. + // -- cp : the function context. + // ----------------------------------- + + if (mode == ConvertReceiverMode::kNullOrUndefined) { + // Patch receiver to global proxy. + __ LoadGlobalProxy(a3); + } else { + Label convert_to_object, convert_receiver; + __ LoadReceiver(a3, a0); + __ JumpIfSmi(a3, &convert_to_object); + STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); + __ GetObjectType(a3, t0, t0); + __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE)); + if (mode != ConvertReceiverMode::kNotNullOrUndefined) { + Label convert_global_proxy; + __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); + __ bind(&convert_global_proxy); + { + // Patch receiver to global proxy. + __ LoadGlobalProxy(a3); + } + __ Branch(&convert_receiver); + } + __ bind(&convert_to_object); + { + // Convert receiver using ToObject. + // TODO(bmeurer): Inline the allocation here to avoid building the frame + // in the fast case? (fall back to AllocateInNewSpace?) + FrameScope scope(masm, StackFrame::INTERNAL); + __ sll(a0, a0, kSmiTagSize); // Smi tagged. + __ Push(a0, a1); + __ mov(a0, a3); + __ Push(cp); + __ Call(BUILTIN_CODE(masm->isolate(), ToObject), + RelocInfo::CODE_TARGET); + __ Pop(cp); + __ mov(a3, v0); + __ Pop(a0, a1); + __ sra(a0, a0, kSmiTagSize); // Un-tag. + } + __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ bind(&convert_receiver); + } + __ StoreReceiver(a3, a0, kScratchReg); + } + __ bind(&done_convert); + + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSFunction) + // -- a2 : the shared function info. + // -- cp : the function context. + // ----------------------------------- + + __ lhu(a2, + FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); + __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump); +} + +// static +void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSBoundFunction) + // ----------------------------------- + __ AssertBoundFunction(a1); + + // Patch the receiver to [[BoundThis]]. + { + __ lw(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); + __ StoreReceiver(t0, a0, kScratchReg); + } + + // Load [[BoundArguments]] into a2 and length of that into t0. + __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); + __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); + __ SmiUntag(t0); + + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSBoundFunction) + // -- a2 : the [[BoundArguments]] (implemented as FixedArray) + // -- t0 : the number of [[BoundArguments]] + // ----------------------------------- + + // Reserve stack space for the [[BoundArguments]]. + { + Label done; + __ sll(t1, t0, kPointerSizeLog2); + __ Subu(t1, sp, Operand(t1)); + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + __ LoadStackLimit(kScratchReg, + MacroAssembler::StackLimitKind::kRealStackLimit); + __ Branch(&done, hs, t1, Operand(kScratchReg)); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowStackOverflow); + } + __ bind(&done); + } + + // Pop receiver. + __ Pop(t1); + + // Push [[BoundArguments]]. + { + Label loop, done_loop; + __ Addu(a0, a0, Operand(t0)); + __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + __ Subu(t0, t0, Operand(1)); + __ Branch(&done_loop, lt, t0, Operand(zero_reg)); + __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2); + __ Lw(kScratchReg, MemOperand(kScratchReg)); + __ Push(kScratchReg); + __ Branch(&loop); + __ bind(&done_loop); + } + + // Push receiver. + __ Push(t1); + + // Call the [[BoundTargetFunction]] via the Call builtin. + __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), + RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the target to call (can be any Object). + // ----------------------------------- + + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + + Label non_callable, class_constructor; + __ JumpIfSmi(target, &non_callable); + __ LoadMap(map, target); + __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE, + scratch); + __ Jump(masm->isolate()->builtins()->CallFunction(mode), + RelocInfo::CODE_TARGET, ls, scratch, + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); + __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); + + // Check if target has a [[Call]] internal method. + { + Register flags = t1; + __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + map = no_reg; + __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask)); + __ Branch(&non_callable, eq, flags, Operand(zero_reg)); + } + + // Check if target is a proxy and call CallProxy external builtin + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, + instance_type, Operand(JS_PROXY_TYPE)); + + // Check if target is a wrapped function and call CallWrappedFunction external + // builtin + __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction), + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_WRAPPED_FUNCTION_TYPE)); + + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) + // Check that the function is not a "classConstructor". + __ Branch(&class_constructor, eq, instance_type, + Operand(JS_CLASS_CONSTRUCTOR_TYPE)); + + // 2. Call to something else, which might have a [[Call]] internal method (if + // not we raise an exception). + // Overwrite the original receiver with the (original) target. + __ StoreReceiver(target, argc, kScratchReg); + // Let the "call_as_function_delegate" take care of the rest. + __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); + __ Jump(masm->isolate()->builtins()->CallFunction( + ConvertReceiverMode::kNotNullOrUndefined), + RelocInfo::CODE_TARGET); + + // 3. Call to something that is not callable. + __ bind(&non_callable); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(target); + __ CallRuntime(Runtime::kThrowCalledNonCallable); + } + + // 4. The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(target); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } +} + +// static +void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the constructor to call (checked to be a JSFunction) + // -- a3 : the new target (checked to be a constructor) + // ----------------------------------- + __ AssertConstructor(a1); + __ AssertFunction(a1); + + // Calling convention for function specific ConstructStubs require + // a2 to contain either an AllocationSite or undefined. + __ LoadRoot(a2, RootIndex::kUndefinedValue); + + Label call_generic_stub; + + // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. + __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFlagsOffset)); + __ And(t0, t0, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); + __ Branch(&call_generic_stub, eq, t0, Operand(zero_reg)); + + __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), + RelocInfo::CODE_TARGET); + + __ bind(&call_generic_stub); + __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), + RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSBoundFunction) + // -- a3 : the new target (checked to be a constructor) + // ----------------------------------- + __ AssertConstructor(a1); + __ AssertBoundFunction(a1); + + // Load [[BoundArguments]] into a2 and length of that into t0. + __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); + __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); + __ SmiUntag(t0); + + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSBoundFunction) + // -- a2 : the [[BoundArguments]] (implemented as FixedArray) + // -- a3 : the new target (checked to be a constructor) + // -- t0 : the number of [[BoundArguments]] + // ----------------------------------- + + // Reserve stack space for the [[BoundArguments]]. + { + Label done; + __ sll(t1, t0, kPointerSizeLog2); + __ Subu(t1, sp, Operand(t1)); + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + __ LoadStackLimit(kScratchReg, + MacroAssembler::StackLimitKind::kRealStackLimit); + __ Branch(&done, hs, t1, Operand(kScratchReg)); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowStackOverflow); + } + __ bind(&done); + } + + // Pop receiver + __ Pop(t1); + + // Push [[BoundArguments]]. + { + Label loop, done_loop; + __ Addu(a0, a0, Operand(t0)); + __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + __ Subu(t0, t0, Operand(1)); + __ Branch(&done_loop, lt, t0, Operand(zero_reg)); + __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2); + __ Lw(kScratchReg, MemOperand(kScratchReg)); + __ Push(kScratchReg); + __ Branch(&loop); + __ bind(&done_loop); + } + + // Push receiver. + __ Push(t1); + + // Patch new.target to [[BoundTargetFunction]] if new.target equals target. + { + Label skip_load; + __ Branch(&skip_load, ne, a1, Operand(a3)); + __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ bind(&skip_load); + } + + // Construct the [[BoundTargetFunction]] via the Construct builtin. + __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_Construct(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the constructor to call (can be any Object) + // -- a3 : the new target (either the same as the constructor or + // the JSFunction on which new was invoked initially) + // ----------------------------------- + + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + + // Check if target is a Smi. + Label non_constructor, non_proxy; + __ JumpIfSmi(target, &non_constructor); + + // Check if target has a [[Construct]] internal method. + __ lw(map, FieldMemOperand(target, HeapObject::kMapOffset)); + { + Register flags = t3; + __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&non_constructor, eq, flags, Operand(zero_reg)); + } + + // Dispatch based on instance type. + __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch); + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), + RelocInfo::CODE_TARGET, ls, scratch, + Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + + // Only dispatch to bound functions after checking whether they are + // constructors. + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); + + // Only dispatch to proxies after checking whether they are constructors. + __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE)); + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), + RelocInfo::CODE_TARGET); + + // Called Construct on an exotic Object with a [[Construct]] internal method. + __ bind(&non_proxy); + { + // Overwrite the original receiver with the (original) target. + __ StoreReceiver(target, argc, kScratchReg); + // Let the "call_as_constructor_delegate" take care of the rest. + __ LoadNativeContextSlot(target, + Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); + __ Jump(masm->isolate()->builtins()->CallFunction(), + RelocInfo::CODE_TARGET); + } + + // Called Construct on an Object that doesn't have a [[Construct]] internal + // method. + __ bind(&non_constructor); + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable), + RelocInfo::CODE_TARGET); +} + +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { + // The function index was put in t0 by the jump table trampoline. + // Convert to Smi for the runtime call. + __ SmiTag(kWasmCompileLazyFuncIndexRegister); + + // Compute register lists for parameters to be saved. We save all parameter + // registers (see wasm-linkage.h). They might be overwritten in the runtime + // call below. We don't have any callee-saved registers in wasm, so no need to + // store anything else. + constexpr RegList kSavedGpRegs = ([]() constexpr { + RegList saved_gp_regs; + for (Register gp_param_reg : wasm::kGpParamRegisters) { + saved_gp_regs.set(gp_param_reg); + } + + // All set registers were unique. + CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); + // The Wasm instance must be part of the saved registers. + CHECK(saved_gp_regs.has(kWasmInstanceRegister)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, + saved_gp_regs.Count()); + return saved_gp_regs; + })(); + + constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { + DoubleRegList saved_fp_regs; + for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { + saved_fp_regs.set(fp_param_reg); + } + + CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, + saved_fp_regs.Count()); + return saved_fp_regs; + })(); + + { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); + + // Save registers that we need to keep alive across the runtime call. + __ MultiPush(kSavedGpRegs); + __ MultiPushFPU(kSavedFpRegs); + + // Pass instance and function index as an explicit arguments to the runtime + // function. + __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); + // Initialize the JavaScript context with 0. CEntry will use it to + // set the current context on the isolate. + __ Move(kContextRegister, Smi::zero()); + __ CallRuntime(Runtime::kWasmCompileLazy, 2); + + // Restore registers. + __ MultiPopFPU(kSavedFpRegs); + __ MultiPop(kSavedGpRegs); + } + + // Untag the returned Smi, for later use. + static_assert(!kSavedGpRegs.has(v0)); + __ SmiUntag(v0); + + // The runtime function returned the jump table slot offset as a Smi (now in + // t8). Use that to compute the jump target. + static_assert(!kSavedGpRegs.has(t8)); + __ Lw(t8, + MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ Addu(t8, v0, t8); + + // Finally, jump to the jump table slot for the function. + __ Jump(t8); +} + +void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + { + FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK); + + // Save all parameter registers. They might hold live values, we restore + // them after the runtime call. + __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs); + __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); + + // Initialize the JavaScript context with 0. CEntry will use it to + // set the current context on the isolate. + __ Move(cp, Smi::zero()); + __ CallRuntime(Runtime::kWasmDebugBreak, 0); + + // Restore registers. + __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); + __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs); + } + __ Ret(); +} + +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + __ Trap(); +} + +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmResume(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} + +#endif // V8_ENABLE_WEBASSEMBLY + +void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, + SaveFPRegsMode save_doubles, ArgvMode argv_mode, + bool builtin_exit_frame) { + // Called from JavaScript; parameters are on stack as if calling JS function + // a0: number of arguments including receiver + // a1: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) + // + // If argv_mode == ArgvMode::kRegister: + // a2: pointer to the first argument + + if (argv_mode == ArgvMode::kRegister) { + // Move argv into the correct register. + __ mov(s1, a2); + } else { + // Compute the argv pointer in a callee-saved register. + __ Lsa(s1, sp, a0, kPointerSizeLog2); + __ Subu(s1, s1, kPointerSize); + } + + // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterExitFrame( + save_doubles == SaveFPRegsMode::kSave, 0, + builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + + // s0: number of arguments including receiver (C callee-saved) + // s1: pointer to first argument (C callee-saved) + // s2: pointer to builtin function (C callee-saved) + + // Prepare arguments for C routine. + // a0 = argc + __ mov(s0, a0); + __ mov(s2, a1); + + // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We + // also need to reserve the 4 argument slots on the stack. + + __ AssertStackIsAligned(); + + // a0 = argc, a1 = argv, a2 = isolate + __ li(a2, ExternalReference::isolate_address(masm->isolate())); + __ mov(a1, s1); + + __ StoreReturnAddressAndCall(s2); + + // Result returned in v0 or v1:v0 - do not destroy these registers! + + // Check result for exception sentinel. + Label exception_returned; + __ LoadRoot(t0, RootIndex::kException); + __ Branch(&exception_returned, eq, t0, Operand(v0)); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + Label okay; + ExternalReference pending_exception_address = ExternalReference::Create( + IsolateAddressId::kPendingExceptionAddress, masm->isolate()); + __ li(a2, pending_exception_address); + __ lw(a2, MemOperand(a2)); + __ LoadRoot(t0, RootIndex::kTheHoleValue); + // Cannot use check here as it attempts to generate call into runtime. + __ Branch(&okay, eq, t0, Operand(a2)); + __ stop(); + __ bind(&okay); + } + + // Exit C frame and return. + // v0:v1: result + // sp: stack pointer + // fp: frame pointer + Register argc = argv_mode == ArgvMode::kRegister + // We don't want to pop arguments so set argc to no_reg. + ? no_reg + // s0: still holds argc (callee-saved). + : s0; + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); + + // Handling of exception. + __ bind(&exception_returned); + + ExternalReference pending_handler_context_address = ExternalReference::Create( + IsolateAddressId::kPendingHandlerContextAddress, masm->isolate()); + ExternalReference pending_handler_entrypoint_address = + ExternalReference::Create( + IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate()); + ExternalReference pending_handler_fp_address = ExternalReference::Create( + IsolateAddressId::kPendingHandlerFPAddress, masm->isolate()); + ExternalReference pending_handler_sp_address = ExternalReference::Create( + IsolateAddressId::kPendingHandlerSPAddress, masm->isolate()); + + // Ask the runtime for help to determine the handler. This will set v0 to + // contain the current pending exception, don't clobber it. + ExternalReference find_handler = + ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(3, 0, a0); + __ mov(a0, zero_reg); + __ mov(a1, zero_reg); + __ li(a2, ExternalReference::isolate_address(masm->isolate())); + __ CallCFunction(find_handler, 3); + } + + // Retrieve the handler context, SP and FP. + __ li(cp, pending_handler_context_address); + __ lw(cp, MemOperand(cp)); + __ li(sp, pending_handler_sp_address); + __ lw(sp, MemOperand(sp)); + __ li(fp, pending_handler_fp_address); + __ lw(fp, MemOperand(fp)); + + // If the handler is a JS frame, restore the context to the frame. Note that + // the context will be set to (cp == 0) for non-JS frames. + Label zero; + __ Branch(&zero, eq, cp, Operand(zero_reg)); + __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ bind(&zero); + + // Clear c_entry_fp, like we do in `LeaveExitFrame`. + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ Sw(zero_reg, MemOperand(scratch)); + } + + // Compute the handler entry address and jump to it. + __ li(t9, pending_handler_entrypoint_address); + __ lw(t9, MemOperand(t9)); + __ Jump(t9); +} + +void Builtins::Generate_DoubleToI(MacroAssembler* masm) { + Label done; + Register result_reg = t0; + + Register scratch = GetRegisterThatIsNotOneOf(result_reg); + Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch); + Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2); + DoubleRegister double_scratch = kScratchDoubleReg; + + // Account for saved regs. + const int kArgumentOffset = 4 * kPointerSize; + + __ Push(result_reg); + __ Push(scratch, scratch2, scratch3); + + // Load double input. + __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset)); + + // Try a conversion to a signed integer. + __ Trunc_w_d(double_scratch, double_scratch); + // Move the converted value into the result register. + __ mfc1(scratch3, double_scratch); + + // Retrieve the FCSR. + __ cfc1(scratch, FCSR); + + // Check for overflow and NaNs. + __ And(scratch, scratch, + kFCSROverflowCauseMask | kFCSRUnderflowCauseMask | + kFCSRInvalidOpCauseMask); + // If we had no exceptions then set result_reg and we are done. + Label error; + __ Branch(&error, ne, scratch, Operand(zero_reg)); + __ Move(result_reg, scratch3); + __ Branch(&done); + __ bind(&error); + + // Load the double value and perform a manual truncation. + Register input_high = scratch2; + Register input_low = scratch3; + + __ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset)); + __ lw(input_high, + MemOperand(sp, kArgumentOffset + Register::kExponentOffset)); + + Label normal_exponent; + // Extract the biased exponent in result. + __ Ext(result_reg, input_high, HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + + // Check for Infinity and NaNs, which should return 0. + __ Subu(scratch, result_reg, HeapNumber::kExponentMask); + __ Movz(result_reg, zero_reg, scratch); + __ Branch(&done, eq, scratch, Operand(zero_reg)); + + // Express exponent as delta to (number of mantissa bits + 31). + __ Subu(result_reg, result_reg, + Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); + + // If the delta is strictly positive, all bits would be shifted away, + // which means that we can return 0. + __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); + __ mov(result_reg, zero_reg); + __ Branch(&done); + + __ bind(&normal_exponent); + const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; + // Calculate shift. + __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits)); + + // Save the sign. + Register sign = result_reg; + result_reg = no_reg; + __ And(sign, input_high, Operand(HeapNumber::kSignMask)); + + // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need + // to check for this specific case. + Label high_shift_needed, high_shift_done; + __ Branch(&high_shift_needed, lt, scratch, Operand(32)); + __ mov(input_high, zero_reg); + __ Branch(&high_shift_done); + __ bind(&high_shift_needed); + + // Set the implicit 1 before the mantissa part in input_high. + __ Or(input_high, input_high, + Operand(1 << HeapNumber::kMantissaBitsInTopWord)); + // Shift the mantissa bits to the correct position. + // We don't need to clear non-mantissa bits as they will be shifted away. + // If they weren't, it would mean that the answer is in the 32bit range. + __ sllv(input_high, input_high, scratch); + + __ bind(&high_shift_done); + + // Replace the shifted bits with bits from the lower mantissa word. + Label pos_shift, shift_done; + __ li(kScratchReg, 32); + __ subu(scratch, kScratchReg, scratch); + __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); + + // Negate scratch. + __ Subu(scratch, zero_reg, scratch); + __ sllv(input_low, input_low, scratch); + __ Branch(&shift_done); + + __ bind(&pos_shift); + __ srlv(input_low, input_low, scratch); + + __ bind(&shift_done); + __ Or(input_high, input_high, Operand(input_low)); + // Restore sign if necessary. + __ mov(scratch, sign); + result_reg = sign; + sign = no_reg; + __ Subu(result_reg, zero_reg, input_high); + __ Movz(result_reg, input_high, scratch); + + __ bind(&done); + __ sw(result_reg, MemOperand(sp, kArgumentOffset)); + __ Pop(scratch, scratch2, scratch3); + __ Pop(result_reg); + __ Ret(); +} + +namespace { + +int AddressOffset(ExternalReference ref0, ExternalReference ref1) { + return ref0.address() - ref1.address(); +} + +// Calls an API function. Allocates HandleScope, extracts returned value +// from handle and propagates exceptions. Restores context. stack_space +// - space to be unwound on exit (includes the call JS arguments space and +// the additional space allocated for the fast call). +void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, + ExternalReference thunk_ref, int stack_space, + MemOperand* stack_space_operand, + MemOperand return_value_operand) { + ASM_CODE_COMMENT(masm); + Isolate* isolate = masm->isolate(); + ExternalReference next_address = + ExternalReference::handle_scope_next_address(isolate); + const int kNextOffset = 0; + const int kLimitOffset = AddressOffset( + ExternalReference::handle_scope_limit_address(isolate), next_address); + const int kLevelOffset = AddressOffset( + ExternalReference::handle_scope_level_address(isolate), next_address); + + DCHECK(function_address == a1 || function_address == a2); + + Label profiler_enabled, end_profiler_check; + __ li(t9, ExternalReference::is_profiling_address(isolate)); + __ lb(t9, MemOperand(t9, 0)); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + __ li(t9, ExternalReference::address_of_runtime_stats_flag()); + __ lw(t9, MemOperand(t9, 0)); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + { + // Call the api function directly. + __ mov(t9, function_address); + __ Branch(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ li(t9, thunk_ref); + } + __ bind(&end_profiler_check); + + // Allocate HandleScope in callee-save registers. + __ li(s5, next_address); + __ lw(s0, MemOperand(s5, kNextOffset)); + __ lw(s1, MemOperand(s5, kLimitOffset)); + __ lw(s2, MemOperand(s5, kLevelOffset)); + __ Addu(s2, s2, Operand(1)); + __ sw(s2, MemOperand(s5, kLevelOffset)); + + __ StoreReturnAddressAndCall(t9); + + Label promote_scheduled_exception; + Label delete_allocated_handles; + Label leave_exit_frame; + Label return_value_loaded; + + // Load value from ReturnValue. + __ lw(v0, return_value_operand); + __ bind(&return_value_loaded); + + // No more valid handles (the result handle was the last one). Restore + // previous handle scope. + __ sw(s0, MemOperand(s5, kNextOffset)); + if (FLAG_debug_code) { + __ lw(a1, MemOperand(s5, kLevelOffset)); + __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, + Operand(s2)); + } + __ Subu(s2, s2, Operand(1)); + __ sw(s2, MemOperand(s5, kLevelOffset)); + __ lw(kScratchReg, MemOperand(s5, kLimitOffset)); + __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg)); + + // Leave the API exit frame. + __ bind(&leave_exit_frame); + + if (stack_space_operand == nullptr) { + DCHECK_NE(stack_space, 0); + __ li(s0, Operand(stack_space)); + } else { + DCHECK_EQ(stack_space, 0); + // The ExitFrame contains four MIPS argument slots after the call so this + // must be accounted for. + // TODO(jgruber): Investigate if this is needed by the direct call. + __ Drop(kCArgSlotCount); + __ lw(s0, *stack_space_operand); + } + + static constexpr bool kDontSaveDoubles = false; + static constexpr bool kRegisterContainsSlotCount = false; + __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, + kRegisterContainsSlotCount); + + // Check if the function scheduled an exception. + __ LoadRoot(t0, RootIndex::kTheHoleValue); + __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); + __ lw(t1, MemOperand(kScratchReg)); + __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1)); + + __ Ret(); + + // Re-throw by promoting a scheduled exception. + __ bind(&promote_scheduled_exception); + __ TailCallRuntime(Runtime::kPromoteScheduledException); + + // HandleScope limit has changed. Delete allocated extensions. + __ bind(&delete_allocated_handles); + __ sw(s1, MemOperand(s5, kLimitOffset)); + __ mov(s0, v0); + __ mov(a0, v0); + __ PrepareCallCFunction(1, s1); + __ li(a0, ExternalReference::isolate_address(isolate)); + __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); + __ mov(v0, s0); + __ jmp(&leave_exit_frame); +} + +} // namespace + +void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- cp : context + // -- a1 : api function address + // -- a2 : arguments count + // -- a3 : call data + // -- a0 : holder + // -- sp[0] : receiver + // -- sp[8] : first argument + // -- ... + // -- sp[(argc) * 8] : last argument + // ----------------------------------- + + Register api_function_address = a1; + Register argc = a2; + Register call_data = a3; + Register holder = a0; + Register scratch = t0; + Register base = t1; // For addressing MemOperands on the stack. + + DCHECK(!AreAliased(api_function_address, argc, call_data, + holder, scratch, base)); + + using FCA = FunctionCallbackArguments; + + STATIC_ASSERT(FCA::kArgsLength == 6); + STATIC_ASSERT(FCA::kNewTargetIndex == 5); + STATIC_ASSERT(FCA::kDataIndex == 4); + STATIC_ASSERT(FCA::kReturnValueOffset == 3); + STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(FCA::kIsolateIndex == 1); + STATIC_ASSERT(FCA::kHolderIndex == 0); + + // Set up FunctionCallbackInfo's implicit_args on the stack as follows: + // + // Target state: + // sp[0 * kPointerSize]: kHolder + // sp[1 * kPointerSize]: kIsolate + // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) + // sp[3 * kPointerSize]: undefined (kReturnValue) + // sp[4 * kPointerSize]: kData + // sp[5 * kPointerSize]: undefined (kNewTarget) + + // Set up the base register for addressing through MemOperands. It will point + // at the receiver (located at sp + argc * kPointerSize). + __ Lsa(base, sp, argc, kPointerSizeLog2); + + // Reserve space on the stack. + __ Subu(sp, sp, Operand(FCA::kArgsLength * kPointerSize)); + + // kHolder. + __ sw(holder, MemOperand(sp, 0 * kPointerSize)); + + // kIsolate. + __ li(scratch, ExternalReference::isolate_address(masm->isolate())); + __ sw(scratch, MemOperand(sp, 1 * kPointerSize)); + + // kReturnValueDefaultValue and kReturnValue. + __ LoadRoot(scratch, RootIndex::kUndefinedValue); + __ sw(scratch, MemOperand(sp, 2 * kPointerSize)); + __ sw(scratch, MemOperand(sp, 3 * kPointerSize)); + + // kData. + __ sw(call_data, MemOperand(sp, 4 * kPointerSize)); + + // kNewTarget. + __ sw(scratch, MemOperand(sp, 5 * kPointerSize)); + + // Keep a pointer to kHolder (= implicit_args) in a scratch register. + // We use it below to set up the FunctionCallbackInfo object. + __ mov(scratch, sp); + + // Allocate the v8::Arguments structure in the arguments' space since + // it's not controlled by GC. + static constexpr int kApiStackSpace = 4; + static constexpr bool kDontSaveDoubles = false; + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + + // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). + // Arguments are after the return address (pushed by EnterExitFrame()). + __ sw(scratch, MemOperand(sp, 1 * kPointerSize)); + + // FunctionCallbackInfo::values_ (points at the first varargs argument passed + // on the stack). + __ Addu(scratch, scratch, + Operand((FCA::kArgsLength + 1) * kSystemPointerSize)); + __ sw(scratch, MemOperand(sp, 2 * kPointerSize)); + + // FunctionCallbackInfo::length_. + __ sw(argc, MemOperand(sp, 3 * kPointerSize)); + + // We also store the number of bytes to drop from the stack after returning + // from the API function here. + // Note: Unlike on other architectures, this stores the number of slots to + // drop, not the number of bytes. + __ Addu(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */)); + __ sw(scratch, MemOperand(sp, 4 * kPointerSize)); + + // v8::InvocationCallback's argument. + DCHECK(!AreAliased(api_function_address, scratch, a0)); + __ Addu(a0, sp, Operand(1 * kPointerSize)); + + ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + + // There are two stack slots above the arguments we constructed on the stack. + // TODO(jgruber): Document what these arguments are. + static constexpr int kStackSlotsAboveFCA = 2; + MemOperand return_value_operand( + fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); + + static constexpr int kUseStackSpaceOperand = 0; + MemOperand stack_space_operand(sp, 4 * kPointerSize); + + AllowExternalCallThatCantCauseGC scope(masm); + CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, + kUseStackSpaceOperand, &stack_space_operand, + return_value_operand); +} + +void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { + // Build v8::PropertyCallbackInfo::args_ array on the stack and push property + // name below the exit frame to make GC aware of them. + STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6); + STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7); + + Register receiver = ApiGetterDescriptor::ReceiverRegister(); + Register holder = ApiGetterDescriptor::HolderRegister(); + Register callback = ApiGetterDescriptor::CallbackRegister(); + Register scratch = t0; + DCHECK(!AreAliased(receiver, holder, callback, scratch)); + + Register api_function_address = a2; + + // Here and below +1 is for name() pushed after the args_ array. + using PCA = PropertyCallbackArguments; + __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); + __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); + __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); + __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); + __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); + __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * + kPointerSize)); + __ li(scratch, ExternalReference::isolate_address(masm->isolate())); + __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); + __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); + // should_throw_on_error -> false + DCHECK_EQ(0, Smi::zero().ptr()); + __ sw(zero_reg, + MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); + __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); + __ sw(scratch, MemOperand(sp, 0 * kPointerSize)); + + // v8::PropertyCallbackInfo::args_ array and name handle. + const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; + + // Load address of v8::PropertyAccessorInfo::args_ array and name handle. + __ mov(a0, sp); // a0 = Handle + __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ + + const int kApiStackSpace = 1; + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(false, kApiStackSpace); + + // Create v8::PropertyCallbackInfo object on the stack and initialize + // it's args_ field. + __ sw(a1, MemOperand(sp, 1 * kPointerSize)); + __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo& + + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(); + + __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); + __ lw(api_function_address, + FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); + + // +3 is to skip prolog, return address and name handle. + MemOperand return_value_operand( + fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); + MemOperand* const kUseStackSpaceConstant = nullptr; + CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, + kStackUnwindSpace, kUseStackSpaceConstant, + return_value_operand); +} + +void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { + // The sole purpose of DirectCEntry is for movable callers (e.g. any general + // purpose Code object) to be able to call into C functions that may trigger + // GC and thus move the caller. + // + // DirectCEntry places the return address on the stack (updated by the GC), + // making the call GC safe. The irregexp backend relies on this. + + // Make place for arguments to fit C calling convention. Callers use + // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't + // have to do that here. Any caller must drop kCArgsSlotsSize stack space + // after the call. + __ Subu(sp, sp, Operand(kCArgsSlotsSize)); + + __ sw(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address. + __ Call(t9); // Call the C++ function. + __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code. + + if (FLAG_debug_code && FLAG_enable_slow_asserts) { + // In case of an error the return address may point to a memory area + // filled with kZapValue by the GC. Dereference the address and check for + // this. + __ lw(t0, MemOperand(t9)); + __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0, + Operand(reinterpret_cast(kZapValue))); + } + + __ Jump(t9); +} + +void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { + // This code assumes that cache lines are 32 bytes and if the cache line is + // larger it will not work correctly. + { + Label lastb, unaligned, aligned, chkw, loop16w, chk1w, wordCopy_loop, + skip_pref, lastbloop, leave, ua_chk16w, ua_loop16w, ua_skip_pref, + ua_chkw, ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; + + // The size of each prefetch. + uint32_t pref_chunk = 32; + // The maximum size of a prefetch, it must not be less than pref_chunk. + // If the real size of a prefetch is greater than max_pref_size and + // the kPrefHintPrepareForStore hint is used, the code will not work + // correctly. + uint32_t max_pref_size = 128; + DCHECK(pref_chunk < max_pref_size); + + // pref_limit is set based on the fact that we never use an offset + // greater then 5 on a store pref and that a single pref can + // never be larger then max_pref_size. + uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; + int32_t pref_hint_load = kPrefHintLoadStreamed; + int32_t pref_hint_store = kPrefHintPrepareForStore; + uint32_t loadstore_chunk = 4; + + // The initial prefetches may fetch bytes that are before the buffer being + // copied. Start copies with an offset of 4 so avoid this situation when + // using kPrefHintPrepareForStore. + DCHECK(pref_hint_store != kPrefHintPrepareForStore || + pref_chunk * 4 >= max_pref_size); + + // If the size is less than 8, go to lastb. Regardless of size, + // copy dst pointer to v0 for the retuen value. + __ slti(t2, a2, 2 * loadstore_chunk); + __ bne(t2, zero_reg, &lastb); + __ mov(v0, a0); // In delay slot. + + // If src and dst have different alignments, go to unaligned, if they + // have the same alignment (but are not actually aligned) do a partial + // load/store to make them aligned. If they are both already aligned + // we can start copying at aligned. + __ xor_(t8, a1, a0); + __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement. + __ bne(t8, zero_reg, &unaligned); + __ subu(a3, zero_reg, a0); // In delay slot. + + __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. + __ beq(a3, zero_reg, &aligned); // Already aligned. + __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count. + + if (kArchEndian == kLittle) { + __ lwr(t8, MemOperand(a1)); + __ addu(a1, a1, a3); + __ swr(t8, MemOperand(a0)); + __ addu(a0, a0, a3); + } else { + __ lwl(t8, MemOperand(a1)); + __ addu(a1, a1, a3); + __ swl(t8, MemOperand(a0)); + __ addu(a0, a0, a3); + } + // Now dst/src are both aligned to (word) aligned addresses. Set a2 to + // count how many bytes we have to copy after all the 64 byte chunks are + // copied and a3 to the dst pointer after all the 64 byte chunks have been + // copied. We will loop, incrementing a0 and a1 until a0 equals a3. + __ bind(&aligned); + __ andi(t8, a2, 0x3F); + __ beq(a2, t8, &chkw); // Less than 64? + __ subu(a3, a2, t8); // In delay slot. + __ addu(a3, a0, a3); // Now a3 is the final dst after loop. + + // When in the loop we prefetch with kPrefHintPrepareForStore hint, + // in this case the a0+x should be past the "t0-32" address. This means: + // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for + // x=64 the last "safe" a0 address is "t0-96". In the current version we + // will use "pref hint, 128(a0)", so "t0-160" is the limit. + if (pref_hint_store == kPrefHintPrepareForStore) { + __ addu(t0, a0, a2); // t0 is the "past the end" address. + __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address. + } + + __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); + + if (pref_hint_store != kPrefHintPrepareForStore) { + __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); + } + __ bind(&loop16w); + __ lw(t0, MemOperand(a1)); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch. + __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg)); + } + __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot. + + __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); + + __ bind(&skip_pref); + __ lw(t2, MemOperand(a1, 2, loadstore_chunk)); + __ lw(t3, MemOperand(a1, 3, loadstore_chunk)); + __ lw(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lw(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lw(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lw(t7, MemOperand(a1, 7, loadstore_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); + + __ sw(t0, MemOperand(a0)); + __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); + + __ lw(t0, MemOperand(a1, 8, loadstore_chunk)); + __ lw(t1, MemOperand(a1, 9, loadstore_chunk)); + __ lw(t2, MemOperand(a1, 10, loadstore_chunk)); + __ lw(t3, MemOperand(a1, 11, loadstore_chunk)); + __ lw(t4, MemOperand(a1, 12, loadstore_chunk)); + __ lw(t5, MemOperand(a1, 13, loadstore_chunk)); + __ lw(t6, MemOperand(a1, 14, loadstore_chunk)); + __ lw(t7, MemOperand(a1, 15, loadstore_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); + + __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 10, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 11, loadstore_chunk)); + __ sw(t4, MemOperand(a0, 12, loadstore_chunk)); + __ sw(t5, MemOperand(a0, 13, loadstore_chunk)); + __ sw(t6, MemOperand(a0, 14, loadstore_chunk)); + __ sw(t7, MemOperand(a0, 15, loadstore_chunk)); + __ addiu(a0, a0, 16 * loadstore_chunk); + __ bne(a0, a3, &loop16w); + __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. + __ mov(a2, t8); + + // Here we have src and dest word-aligned but less than 64-bytes to go. + // Check for a 32 bytes chunk and copy if there is one. Otherwise jump + // down to chk1w to handle the tail end of the copy. + __ bind(&chkw); + __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); + __ andi(t8, a2, 0x1F); + __ beq(a2, t8, &chk1w); // Less than 32? + __ nop(); // In delay slot. + __ lw(t0, MemOperand(a1)); + __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lw(t2, MemOperand(a1, 2, loadstore_chunk)); + __ lw(t3, MemOperand(a1, 3, loadstore_chunk)); + __ lw(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lw(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lw(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lw(t7, MemOperand(a1, 7, loadstore_chunk)); + __ addiu(a1, a1, 8 * loadstore_chunk); + __ sw(t0, MemOperand(a0)); + __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); + __ addiu(a0, a0, 8 * loadstore_chunk); + + // Here we have less than 32 bytes to copy. Set up for a loop to copy + // one word at a time. Set a2 to count how many bytes we have to copy + // after all the word chunks are copied and a3 to the dst pointer after + // all the word chunks have been copied. We will loop, incrementing a0 + // and a1 until a0 equals a3. + __ bind(&chk1w); + __ andi(a2, t8, loadstore_chunk - 1); + __ beq(a2, t8, &lastb); + __ subu(a3, t8, a2); // In delay slot. + __ addu(a3, a0, a3); + + __ bind(&wordCopy_loop); + __ lw(t3, MemOperand(a1)); + __ addiu(a0, a0, loadstore_chunk); + __ addiu(a1, a1, loadstore_chunk); + __ bne(a0, a3, &wordCopy_loop); + __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. + + __ bind(&lastb); + __ Branch(&leave, le, a2, Operand(zero_reg)); + __ addu(a3, a0, a2); + + __ bind(&lastbloop); + __ lb(v1, MemOperand(a1)); + __ addiu(a0, a0, 1); + __ addiu(a1, a1, 1); + __ bne(a0, a3, &lastbloop); + __ sb(v1, MemOperand(a0, -1)); // In delay slot. + + __ bind(&leave); + __ jr(ra); + __ nop(); + + // Unaligned case. Only the dst gets aligned so we need to do partial + // loads of the source followed by normal stores to the dst (once we + // have aligned the destination). + __ bind(&unaligned); + __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. + __ beq(a3, zero_reg, &ua_chk16w); + __ subu(a2, a2, a3); // In delay slot. + + if (kArchEndian == kLittle) { + __ lwr(v1, MemOperand(a1)); + __ lwl(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ addu(a1, a1, a3); + __ swr(v1, MemOperand(a0)); + __ addu(a0, a0, a3); + } else { + __ lwl(v1, MemOperand(a1)); + __ lwr(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ addu(a1, a1, a3); + __ swl(v1, MemOperand(a0)); + __ addu(a0, a0, a3); + } + + // Now the dst (but not the source) is aligned. Set a2 to count how many + // bytes we have to copy after all the 64 byte chunks are copied and a3 to + // the dst pointer after all the 64 byte chunks have been copied. We will + // loop, incrementing a0 and a1 until a0 equals a3. + __ bind(&ua_chk16w); + __ andi(t8, a2, 0x3F); + __ beq(a2, t8, &ua_chkw); + __ subu(a3, a2, t8); // In delay slot. + __ addu(a3, a0, a3); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ addu(t0, a0, a2); + __ Subu(t9, t0, pref_limit); + } + + __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); + __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); + + if (pref_hint_store != kPrefHintPrepareForStore) { + __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); + } + + __ bind(&ua_loop16w); + __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); + if (kArchEndian == kLittle) { + __ lwr(t0, MemOperand(a1)); + __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ sltu(v1, t9, a0); + __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); + } + __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. + + __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); + + __ bind(&ua_skip_pref); + __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwl(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(t0, MemOperand(a1)); + __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); + + if (pref_hint_store == kPrefHintPrepareForStore) { + __ sltu(v1, t9, a0); + __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); + } + __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. + + __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); + __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); + + __ bind(&ua_skip_pref); + __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwr(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + } + __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); + __ sw(t0, MemOperand(a0)); + __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); + if (kArchEndian == kLittle) { + __ lwr(t0, MemOperand(a1, 8, loadstore_chunk)); + __ lwr(t1, MemOperand(a1, 9, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 10, loadstore_chunk)); + __ lwr(t3, MemOperand(a1, 11, loadstore_chunk)); + __ lwr(t4, MemOperand(a1, 12, loadstore_chunk)); + __ lwr(t5, MemOperand(a1, 13, loadstore_chunk)); + __ lwr(t6, MemOperand(a1, 14, loadstore_chunk)); + __ lwr(t7, MemOperand(a1, 15, loadstore_chunk)); + __ lwl(t0, + MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t4, + MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t5, + MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t6, + MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t7, + MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(t0, MemOperand(a1, 8, loadstore_chunk)); + __ lwl(t1, MemOperand(a1, 9, loadstore_chunk)); + __ lwl(t2, MemOperand(a1, 10, loadstore_chunk)); + __ lwl(t3, MemOperand(a1, 11, loadstore_chunk)); + __ lwl(t4, MemOperand(a1, 12, loadstore_chunk)); + __ lwl(t5, MemOperand(a1, 13, loadstore_chunk)); + __ lwl(t6, MemOperand(a1, 14, loadstore_chunk)); + __ lwl(t7, MemOperand(a1, 15, loadstore_chunk)); + __ lwr(t0, + MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t1, + MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t2, + MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t3, + MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t4, + MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t5, + MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t6, + MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t7, + MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); + } + __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); + __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); + __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 10, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 11, loadstore_chunk)); + __ sw(t4, MemOperand(a0, 12, loadstore_chunk)); + __ sw(t5, MemOperand(a0, 13, loadstore_chunk)); + __ sw(t6, MemOperand(a0, 14, loadstore_chunk)); + __ sw(t7, MemOperand(a0, 15, loadstore_chunk)); + __ addiu(a0, a0, 16 * loadstore_chunk); + __ bne(a0, a3, &ua_loop16w); + __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. + __ mov(a2, t8); + + // Here less than 64-bytes. Check for + // a 32 byte chunk and copy if there is one. Otherwise jump down to + // ua_chk1w to handle the tail end of the copy. + __ bind(&ua_chkw); + __ Pref(pref_hint_load, MemOperand(a1)); + __ andi(t8, a2, 0x1F); + + __ beq(a2, t8, &ua_chk1w); + __ nop(); // In delay slot. + if (kArchEndian == kLittle) { + __ lwr(t0, MemOperand(a1)); + __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); + __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); + __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwl(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwl(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(t0, MemOperand(a1)); + __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); + __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); + __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); + __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); + __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); + __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); + __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); + __ lwr(t0, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t1, + MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t2, + MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t3, + MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t4, + MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t5, + MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t6, + MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); + __ lwr(t7, + MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); + } + __ addiu(a1, a1, 8 * loadstore_chunk); + __ sw(t0, MemOperand(a0)); + __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); + __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); + __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); + __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); + __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); + __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); + __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); + __ addiu(a0, a0, 8 * loadstore_chunk); + + // Less than 32 bytes to copy. Set up for a loop to + // copy one word at a time. + __ bind(&ua_chk1w); + __ andi(a2, t8, loadstore_chunk - 1); + __ beq(a2, t8, &ua_smallCopy); + __ subu(a3, t8, a2); // In delay slot. + __ addu(a3, a0, a3); + + __ bind(&ua_wordCopy_loop); + if (kArchEndian == kLittle) { + __ lwr(v1, MemOperand(a1)); + __ lwl(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + } else { + __ lwl(v1, MemOperand(a1)); + __ lwr(v1, + MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); + } + __ addiu(a0, a0, loadstore_chunk); + __ addiu(a1, a1, loadstore_chunk); + __ bne(a0, a3, &ua_wordCopy_loop); + __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. + + // Copy the last 8 bytes. + __ bind(&ua_smallCopy); + __ beq(a2, zero_reg, &leave); + __ addu(a3, a0, a2); // In delay slot. + + __ bind(&ua_smallCopy_loop); + __ lb(v1, MemOperand(a1)); + __ addiu(a0, a0, 1); + __ addiu(a1, a1, 1); + __ bne(a0, a3, &ua_smallCopy_loop); + __ sb(v1, MemOperand(a0, -1)); // In delay slot. + + __ jr(ra); + __ nop(); + } +} + +namespace { + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Generate_DeoptimizationEntry(MacroAssembler* masm, + DeoptimizeKind deopt_kind) { + Isolate* isolate = masm->isolate(); + + // Unlike on ARM we don't save all the registers, just the useful ones. + // For the rest, there are gaps on the stack, so the offsets remain the same. + static constexpr int kNumberOfRegisters = Register::kNumRegisters; + + RegList restored_regs = kJSCallerSaved | kCalleeSaved; + RegList saved_regs = restored_regs | sp | ra; + + static constexpr int kDoubleRegsSize = + kDoubleSize * DoubleRegister::kNumRegisters; + + // Save all FPU registers before messing with them. + __ Subu(sp, sp, Operand(kDoubleRegsSize)); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int offset = code * kDoubleSize; + __ Sdc1(fpu_reg, MemOperand(sp, offset)); + } + + // Push saved_regs (needed to populate FrameDescription::registers_). + // Leave gaps for other registers. + __ Subu(sp, sp, kNumberOfRegisters * kPointerSize); + for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { + if ((saved_regs.bits() & (1 << i)) != 0) { + __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i)); + } + } + + __ li(a2, + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate)); + __ sw(fp, MemOperand(a2)); + + static constexpr int kSavedRegistersAreaSize = + (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; + + // Get the address of the location in the code object (a2) (return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register a3. + __ mov(a2, ra); + __ Addu(a3, sp, Operand(kSavedRegistersAreaSize)); + __ Subu(a3, fp, a3); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(5, t0); + // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack. + __ mov(a0, zero_reg); + Label context_check; + __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(a1, &context_check); + __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ li(a1, Operand(static_cast(deopt_kind))); + // a2: code address or 0 already loaded. + // a3: Fp-to-sp delta already loaded. + __ li(t0, ExternalReference::isolate_address(isolate)); + __ sw(t0, CFunctionArgumentOperand(5)); // Isolate. + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); + } + + // Preserve "deoptimizer" object in register v0 and get the input + // frame descriptor pointer to a1 (deoptimizer->input_); + // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. + __ mov(a0, v0); + __ lw(a1, MemOperand(v0, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((saved_regs.bits() & (1 << i)) != 0) { + __ lw(a2, MemOperand(sp, i * kPointerSize)); + __ sw(a2, MemOperand(a1, offset)); + } else if (FLAG_debug_code) { + __ li(a2, kDebugZapValue); + __ sw(a2, MemOperand(a1, offset)); + } + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Copy FPU registers to + // double_registers_[DoubleRegister::kNumAllocatableRegisters] + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; + __ Ldc1(f0, MemOperand(sp, src_offset)); + __ Sdc1(f0, MemOperand(a1, dst_offset)); + } + + // Remove the saved registers from the stack. + __ Addu(sp, sp, Operand(kSavedRegistersAreaSize)); + + // Compute a pointer to the unwinding limit in register a2; that is + // the first stack slot not part of the input frame. + __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); + __ Addu(a2, a2, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ BranchShort(&pop_loop_header); + __ bind(&pop_loop); + __ pop(t0); + __ sw(t0, MemOperand(a3, 0)); + __ addiu(a3, a3, sizeof(uint32_t)); + __ bind(&pop_loop_header); + __ BranchShort(&pop_loop, ne, a2, Operand(sp)); + + // Compute the output frame in the deoptimizer. + __ push(a0); // Preserve deoptimizer object across call. + // a0: deoptimizer object; a1: scratch. + __ PrepareCallCFunction(1, a1); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(a0); // Restore deoptimizer object (class Deoptimizer). + + __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: t0 = current "FrameDescription** output_", + // a1 = one past the last FrameDescription**. + __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); + __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_. + __ Lsa(a1, t0, a1, kPointerSizeLog2); + __ BranchShort(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: a2 = current FrameDescription*, a3 = loop index. + __ lw(a2, MemOperand(t0, 0)); // output_[ix] + __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset())); + __ BranchShort(&inner_loop_header); + __ bind(&inner_push_loop); + __ Subu(a3, a3, Operand(sizeof(uint32_t))); + __ Addu(t2, a2, Operand(a3)); + __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset())); + __ push(t3); + __ bind(&inner_loop_header); + __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); + + __ Addu(t0, t0, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ BranchShort(&outer_push_loop, lt, t0, Operand(a1)); + + __ lw(a1, MemOperand(a0, Deoptimizer::input_offset())); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ Ldc1(fpu_reg, MemOperand(a1, src_offset)); + } + + // Push pc and continuation from the last output frame. + __ lw(t2, MemOperand(a2, FrameDescription::pc_offset())); + __ push(t2); + __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset())); + __ push(t2); + + // Technically restoring 'at' should work unless zero_reg is also restored + // but it's safer to check for this. + DCHECK(!(restored_regs.has(at))); + // Restore the registers from the last output frame. + __ mov(at, a2); + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + if ((restored_regs.bits() & (1 << i)) != 0) { + __ lw(ToRegister(i), MemOperand(at, offset)); + } + } + + __ pop(at); // Get continuation, leave pc on stack. + __ pop(ra); + __ Jump(at); + __ stop(); +} + +} // namespace + +void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) { + Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager); +} + +void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { + Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); +} + +void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) { + Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused); +} + +namespace { + +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { + Label start; + __ bind(&start); + + // Get function from the frame. + Register closure = a1; + __ Lw(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + + // Get the Code object from the shared function info. + Register code_obj = s1; + __ Lw(code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ Lw(code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ GetObjectType(code_obj, t6, t6); + __ Branch(&start_with_baseline, eq, t6, Operand(CODET_TYPE)); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ GetObjectType(code_obj, t6, t6); + __ Assert(eq, AbortReason::kExpectedBaselineData, t6, Operand(CODET_TYPE)); + } + + if (FLAG_debug_code) { + AssertCodeIsBaseline(masm, code_obj, t2); + } + + // Replace BytecodeOffset with the feedback vector. + Register feedback_vector = a2; + __ Lw(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ Lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + __ GetObjectType(feedback_vector, t6, t6); + __ Branch(&install_baseline_code, ne, t6, Operand(FEEDBACK_VECTOR_TYPE)); + + // Save BytecodeOffset from the stack frame. + __ Lw(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + // Replace BytecodeOffset with the feedback vector. + __ Sw(feedback_vector, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + feedback_vector = no_reg; + + // Compute baseline pc for bytecode offset. + ExternalReference get_baseline_pc_extref; + if (next_bytecode || is_osr) { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_next_executed_bytecode(); + } else { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_bytecode_offset(); + } + + Register get_baseline_pc = a3; + __ li(get_baseline_pc, get_baseline_pc_extref); + + // If the code deoptimizes during the implicit function entry stack interrupt + // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is + // not a valid bytecode offset. + // TODO(pthier): Investigate if it is feasible to handle this special case + // in TurboFan instead of here. + Label valid_bytecode_offset, function_entry_bytecode; + if (!is_osr) { + __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset)); + } + + __ Subu(kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeOffsetRegister, + (BytecodeArray::kHeaderSize - kHeapObjectTag)); + + __ bind(&valid_bytecode_offset); + // Get bytecode array from the stack frame. + __ Lw(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + // Save the accumulator register, since it's clobbered by the below call. + __ Push(kInterpreterAccumulatorRegister); + { + Register arg_reg_1 = a0; + Register arg_reg_2 = a1; + Register arg_reg_3 = a2; + __ Move(arg_reg_1, code_obj); + __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister); + __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister); + FrameScope scope(masm, StackFrame::INTERNAL); + __ PrepareCallCFunction(3, 0, t0); + __ CallCFunction(get_baseline_pc, 3, 0); + } + __ Addu(code_obj, code_obj, kReturnRegister0); + __ Pop(kInterpreterAccumulatorRegister); + + if (is_osr) { + // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm + // Sparkplug here. + // TODO(liuyu): Remove Ld as arm64 after register reallocation. + __ Lw(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister); + Generate_OSREntry(masm, code_obj, + Operand(Code::kHeaderSize - kHeapObjectTag)); + } else { + __ Addu(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag); + __ Jump(code_obj); + } + __ Trap(); // Unreachable. + + if (!is_osr) { + __ bind(&function_entry_bytecode); + // If the bytecode offset is kFunctionEntryOffset, get the start address of + // the first bytecode. + __ mov(kInterpreterBytecodeOffsetRegister, zero_reg); + if (next_bytecode) { + __ li(get_baseline_pc, + ExternalReference::baseline_pc_for_bytecode_offset()); + } + __ Branch(&valid_bytecode_offset); + } + + __ bind(&install_baseline_code); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); + __ Push(closure); + __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); + } + // Retry from the start after installing baseline code. + __ Branch(&start); +} +} // namespace + +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); +} + +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); +} + +void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false, true); +} + +#undef __ + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc new file mode 100644 index 00000000000000..addbe945462fa5 --- /dev/null +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -0,0 +1,3870 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_RISCV64 + +#include "src/api/api-arguments.h" +#include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" +#include "src/debug/debug.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/execution/frame-constants.h" +#include "src/execution/frames.h" +#include "src/logging/counters.h" +// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. +#include "src/codegen/macro-assembler-inl.h" +#include "src/codegen/register-configuration.h" +#include "src/codegen/riscv64/constants-riscv64.h" +#include "src/heap/heap-inl.h" +#include "src/objects/cell.h" +#include "src/objects/foreign.h" +#include "src/objects/heap-number.h" +#include "src/objects/js-generator.h" +#include "src/objects/objects-inl.h" +#include "src/objects/smi.h" +#include "src/runtime/runtime.h" +#include "src/wasm/wasm-linkage.h" +#include "src/wasm/wasm-objects.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + +void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { + ASM_CODE_COMMENT(masm); + __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); + __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), + RelocInfo::CODE_TARGET); +} + +static void GenerateTailCallToReturnedCode(MacroAssembler* masm, + Runtime::FunctionId function_id) { + // ----------- S t a t e ------------- + // -- a0 : actual argument count + // -- a1 : target function (preserved for callee) + // -- a3 : new target (preserved for callee) + // ----------------------------------- + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push a copy of the target function, the new target and the actual + // argument count. + // Push function as parameter to the runtime call. + __ SmiTag(kJavaScriptCallArgCountRegister); + __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, + kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister); + + __ CallRuntime(function_id, 1); + // Use the return value before restoring a0 + __ Add64(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Restore target function, new target and actual argument count. + __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, + kJavaScriptCallArgCountRegister); + __ SmiUntag(kJavaScriptCallArgCountRegister); + } + + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ Jump(a2); +} + +namespace { + +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, Register scratch2, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Label loop, entry; + __ Sub64(scratch, argc, Operand(kJSArgcReceiverSlots)); + __ Branch(&entry); + __ bind(&loop); + __ CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2); + __ Ld(scratch2, MemOperand(scratch2)); + if (element_type == ArgumentsElementType::kHandle) { + __ Ld(scratch2, MemOperand(scratch2)); + } + __ push(scratch2); + __ bind(&entry); + __ Add64(scratch, scratch, Operand(-1)); + __ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); +} + +void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- a1 : constructor function + // -- a3 : new target + // -- cp : context + // -- ra : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + + // Enter a construct frame. + { + FrameScope scope(masm, StackFrame::CONSTRUCT); + + // Preserve the incoming parameters on the stack. + __ SmiTag(a0); + __ Push(cp, a0); + __ SmiUntag(a0); + + // Set up pointer to first argument (skip receiver). + __ Add64( + t2, fp, + Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); + // t2: Pointer to start of arguments. + // a0: Number of arguments. + { + UseScratchRegisterScope temps(masm); + temps.Include(t0); + Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(), + ArgumentsElementType::kRaw); + } + // The receiver for the builtin/api call. + __ PushRoot(RootIndex::kTheHoleValue); + + // Call the function. + // a0: number of arguments (untagged) + // a1: constructor function + // a3: new target + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); + + // Restore context from the frame. + __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + // Restore smi-tagged arguments count from the frame. + __ Ld(kScratchReg, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + // Leave construct frame. + } + + // Remove caller arguments from the stack and return. + __ DropArguments(kScratchReg, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, kScratchReg); + __ Ret(); +} + +} // namespace + +// The construct stub for ES5 constructor functions and ES6 class constructors. +void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0: number of arguments (untagged) + // -- a1: constructor function + // -- a3: new target + // -- cp: context + // -- ra: return address + // -- sp[...]: constructor arguments + // ----------------------------------- + UseScratchRegisterScope temps(masm); + temps.Include(t0, t1); + // Enter a construct frame. + FrameScope scope(masm, StackFrame::MANUAL); + Label post_instantiation_deopt_entry, not_create_implicit_receiver; + __ EnterFrame(StackFrame::CONSTRUCT); + + // Preserve the incoming parameters on the stack. + __ SmiTag(a0); + __ Push(cp, a0, a1); + __ PushRoot(RootIndex::kUndefinedValue); + __ Push(a3); + + // ----------- S t a t e ------------- + // -- sp[0*kSystemPointerSize]: new target + // -- sp[1*kSystemPointerSize]: padding + // -- a1 and sp[2*kSystemPointerSize]: constructor function + // -- sp[3*kSystemPointerSize]: number of arguments (tagged) + // -- sp[4*kSystemPointerSize]: context + // ----------------------------------- + { + UseScratchRegisterScope temps(masm); + Register func_info = temps.Acquire(); + __ LoadTaggedPointerField( + func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ Lwu(func_info, + FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset)); + __ DecodeField(func_info); + __ JumpIfIsInRange( + func_info, + static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); + Register scratch = func_info; + Register scratch2 = temps.Acquire(); + // If not derived class constructor: Allocate the new receiver object. + __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, + scratch, scratch2); + __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), + RelocInfo::CODE_TARGET); + __ BranchShort(&post_instantiation_deopt_entry); + + // Else: use TheHoleValue as receiver for constructor call + __ bind(¬_create_implicit_receiver); + __ LoadRoot(a0, RootIndex::kTheHoleValue); + } + // ----------- S t a t e ------------- + // -- a0: receiver + // -- Slot 4 / sp[0*kSystemPointerSize]: new target + // -- Slot 3 / sp[1*kSystemPointerSize]: padding + // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function + // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kSystemPointerSize]: context + // ----------------------------------- + // Deoptimizer enters here. + masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( + masm->pc_offset()); + __ bind(&post_instantiation_deopt_entry); + + // Restore new target. + __ Pop(a3); + + // Push the allocated receiver to the stack. + __ Push(a0); + + // We need two copies because we may have to return the original one + // and the calling conventions dictate that the called function pops the + // receiver. The second copy is pushed after the arguments, we saved in a6 + // since a0 will store the return value of callRuntime. + __ Move(a6, a0); + + // Set up pointer to first argument (skip receiver).. + __ Add64( + t2, fp, + Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); + + // ----------- S t a t e ------------- + // -- a3: new target + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: implicit receiver + // -- sp[2*kSystemPointerSize]: padding + // -- sp[3*kSystemPointerSize]: constructor function + // -- sp[4*kSystemPointerSize]: number of arguments (tagged) + // -- sp[5*kSystemPointerSize]: context + // ----------------------------------- + + // Restore constructor function and argument count. + __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); + __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + __ SmiUntag(a0); + + Label stack_overflow; + { + UseScratchRegisterScope temps(masm); + __ StackOverflowCheck(a0, temps.Acquire(), temps.Acquire(), + &stack_overflow); + } + // TODO(victorgomes): When the arguments adaptor is completely removed, we + // should get the formal parameter count and copy the arguments in its + // correct position (including any undefined), instead of delaying this to + // InvokeFunction. + + // Copy arguments and receiver to the expression stack. + // t2: Pointer to start of argument. + // a0: Number of arguments. + { + UseScratchRegisterScope temps(masm); + Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(), + ArgumentsElementType::kRaw); + } + // We need two copies because we may have to return the original one + // and the calling conventions dictate that the called function pops the + // receiver. The second copy is pushed after the arguments, + __ Push(a6); + + // Call the function. + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); + + // ----------- S t a t e ------------- + // -- a0: constructor result + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: padding + // -- sp[2*kSystemPointerSize]: constructor function + // -- sp[3*kSystemPointerSize]: number of arguments + // -- sp[4*kSystemPointerSize]: context + // ----------------------------------- + + // Store offset of return address for deoptimizer. + masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( + masm->pc_offset()); + + // Restore the context from the frame. + __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, do_throw, leave_and_return, check_receiver; + + // If the result is undefined, we jump out to using the implicit receiver. + __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver); + + // Otherwise we do a smi check and fall through to check if the return value + // is a valid receiver. + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ Ld(a0, MemOperand(sp, 0 * kSystemPointerSize)); + __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw); + + __ bind(&leave_and_return); + // Restore smi-tagged arguments count from the frame. + __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + // Leave construct frame. + __ LeaveFrame(StackFrame::CONSTRUCT); + + // Remove caller arguments from the stack and return. + __ DropArguments(a1, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, a4); + __ Ret(); + + __ bind(&check_receiver); + __ JumpIfSmi(a0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. + { + UseScratchRegisterScope temps(masm); + Register map = temps.Acquire(), type = temps.Acquire(); + __ GetObjectType(a0, map, type); + + STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); + __ Branch(&leave_and_return, greater_equal, type, + Operand(FIRST_JS_RECEIVER_TYPE)); + __ Branch(&use_receiver); + } + __ bind(&do_throw); + // Restore the context from the frame. + __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); + __ break_(0xCC); + + __ bind(&stack_overflow); + // Restore the context from the frame. + __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kThrowStackOverflow); + __ break_(0xCC); +} + +void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { + Generate_JSBuiltinsConstructStubHelper(masm); +} + +static void AssertCodeIsBaseline(MacroAssembler* masm, Register code, + Register scratch) { + DCHECK(!AreAliased(code, scratch)); + // Verify that the code kind is baseline code via the CodeKind. + __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset)); + __ DecodeField(scratch); + __ Assert(eq, AbortReason::kExpectedBaselineData, scratch, + Operand(static_cast(CodeKind::BASELINE))); +} +// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under +// the more general dispatch. +static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, + Register sfi_data, + Register scratch1, + Label* is_baseline) { + ASM_CODE_COMMENT(masm); + Label done; + + __ GetObjectType(sfi_data, scratch1, scratch1); + __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE)); + + __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE), + Label::Distance::kNear); + __ LoadTaggedPointerField( + sfi_data, + FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); + + __ bind(&done); +} + +// static +void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the value to pass to the generator + // -- a1 : the JSGeneratorObject to resume + // -- ra : return address + // ----------------------------------- + + // Store input value into generator object. + __ StoreTaggedField( + a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); + __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore); + // Check that a1 is still valid, RecordWrite might have clobbered it. + __ AssertGeneratorObject(a1); + + // Load suspended function and context. + __ LoadTaggedPointerField( + a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedPointerField(cp, + FieldMemOperand(a4, JSFunction::kContextOffset)); + + // Flood function if we are stepping. + Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; + Label stepping_prepared; + ExternalReference debug_hook = + ExternalReference::debug_hook_on_function_call_address(masm->isolate()); + __ li(a5, debug_hook); + __ Lb(a5, MemOperand(a5)); + __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg)); + + // Flood function if we need to continue stepping in the suspended generator. + ExternalReference debug_suspended_generator = + ExternalReference::debug_suspended_generator_address(masm->isolate()); + __ li(a5, debug_suspended_generator); + __ Ld(a5, MemOperand(a5)); + __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5)); + __ bind(&stepping_prepared); + + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + Label stack_overflow; + __ LoadStackLimit(kScratchReg, + MacroAssembler::StackLimitKind::kRealStackLimit); + __ Branch(&stack_overflow, Uless, sp, Operand(kScratchReg)); + + // ----------- S t a t e ------------- + // -- a1 : the JSGeneratorObject to resume + // -- a4 : generator function + // -- cp : generator context + // -- ra : return address + // ----------------------------------- + + // Push holes for arguments to generator function. Since the parser forced + // context allocation for any variables in generators, the actual argument + // values have already been copied into the context and these dummy values + // will never be used. + __ LoadTaggedPointerField( + a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); + __ Lhu(a3, + FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + __ Sub64(a3, a3, Operand(kJSArgcReceiverSlots)); + __ LoadTaggedPointerField( + t1, + FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); + { + Label done_loop, loop; + __ bind(&loop); + __ Sub64(a3, a3, Operand(1)); + __ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear); + __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2); + __ LoadAnyTaggedField( + kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); + __ Push(kScratchReg); + __ Branch(&loop); + __ bind(&done_loop); + // Push receiver. + __ LoadAnyTaggedField( + kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); + __ Push(kScratchReg); + } + + // Underlying function needs to have bytecode available. + if (FLAG_debug_code) { + Label is_baseline; + __ LoadTaggedPointerField( + a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); + GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline); + __ GetObjectType(a3, a3, a3); + __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, + Operand(BYTECODE_ARRAY_TYPE)); + __ bind(&is_baseline); + } + + // Resume (Ignition/TurboFan) generator object. + { + __ LoadTaggedPointerField( + a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); + __ Lhu(a0, FieldMemOperand( + a0, SharedFunctionInfo::kFormalParameterCountOffset)); + // We abuse new.target both to indicate that this is a resume call and to + // pass in the generator object. In ordinary calls, new.target is always + // undefined because generator functions are non-constructable. + __ Move(a3, a1); + __ Move(a1, a4); + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); + __ JumpCodeObject(a2); + } + + __ bind(&prepare_step_in_if_stepping); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1, a4); + // Push hole as receiver since we do not use it for stepping. + __ PushRoot(RootIndex::kTheHoleValue); + __ CallRuntime(Runtime::kDebugOnFunctionCall); + __ Pop(a1); + } + __ LoadTaggedPointerField( + a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + __ Branch(&stepping_prepared); + + __ bind(&prepare_step_in_suspended_generator); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); + __ Pop(a1); + } + __ LoadTaggedPointerField( + a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + __ Branch(&stepping_prepared); + + __ bind(&stack_overflow); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowStackOverflow); + __ break_(0xCC); // This should be unreachable. + } +} + +void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kThrowConstructedNonConstructable); +} + +// Clobbers scratch1 and scratch2; preserves all other registers. +static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, + Register scratch1, Register scratch2) { + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit); + // Make a2 the space we have left. The stack might already be overflowed + // here which will cause r2 to become negative. + __ Sub64(scratch1, sp, scratch1); + // Check if the arguments will overflow the stack. + __ Sll64(scratch2, argc, kSystemPointerSizeLog2); + __ Branch(&okay, gt, scratch1, Operand(scratch2), + Label::Distance::kNear); // Signed comparison. + + // Out of stack space. + __ CallRuntime(Runtime::kThrowStackOverflow); + + __ bind(&okay); +} + +namespace { + +// Called with the native C calling convention. The corresponding function +// signature is either: +// +// using JSEntryFunction = GeneratedCode; +// or +// using JSEntryFunction = GeneratedCode; +void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, + Builtin entry_trampoline) { + Label invoke, handler_entry, exit; + + { + NoRootArrayScope no_root_array(masm); + + // TODO(plind): unify the ABI description here. + // Registers: + // either + // a0: root register value + // a1: entry address + // a2: function + // a3: receiver + // a4: argc + // a5: argv + // or + // a0: root register value + // a1: microtask_queue + + // Save callee saved registers on the stack. + __ MultiPush(kCalleeSaved | ra); + + // Save callee-saved FPU registers. + __ MultiPushFPU(kCalleeSavedFPU); + // Set up the reserved register for 0.0. + __ LoadFPRImmediate(kDoubleRegZero, 0.0); + + // Initialize the root register. + // C calling convention. The first argument is passed in a0. + __ Move(kRootRegister, a0); + +#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE + // Initialize the pointer cage base register. + __ LoadRootRelative(kPtrComprCageBaseRegister, + IsolateData::cage_base_offset()); +#endif + } + + // a1: entry address + // a2: function + // a3: receiver + // a4: argc + // a5: argv + + // We build an EntryFrame. + __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used. + __ li(s2, Operand(StackFrame::TypeToMarker(type))); + __ li(s3, Operand(StackFrame::TypeToMarker(type))); + ExternalReference c_entry_fp = ExternalReference::Create( + IsolateAddressId::kCEntryFPAddress, masm->isolate()); + __ li(s5, c_entry_fp); + __ Ld(s4, MemOperand(s5)); + __ Push(s1, s2, s3, s4); + // Clear c_entry_fp, now we've pushed its previous value to the stack. + // If the c_entry_fp is not already zero and we don't clear it, the + // SafeStackFrameIterator will assume we are executing C++ and miss the JS + // frames on top. + __ Sd(zero_reg, MemOperand(s5)); + // Set up frame pointer for the frame to be pushed. + __ Add64(fp, sp, -EntryFrameConstants::kCallerFPOffset); + // Registers: + // either + // a1: entry address + // a2: function + // a3: receiver + // a4: argc + // a5: argv + // or + // a1: microtask_queue + // + // Stack: + // caller fp | + // function slot | entry frame + // context slot | + // bad fp (0xFF...F) | + // callee saved registers + ra + // [ O32: 4 args slots] + // args + + // If this is the outermost JS call, set js_entry_sp value. + Label non_outermost_js; + ExternalReference js_entry_sp = ExternalReference::Create( + IsolateAddressId::kJSEntrySPAddress, masm->isolate()); + __ li(s1, js_entry_sp); + __ Ld(s2, MemOperand(s1)); + __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg), + Label::Distance::kNear); + __ Sd(fp, MemOperand(s1)); + __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + Label cont; + __ Branch(&cont); + __ bind(&non_outermost_js); + __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME)); + __ bind(&cont); + __ push(s3); + + // Jump to a faked try block that does the invoke, with a faked catch + // block that sets the pending exception. + __ BranchShort(&invoke); + __ bind(&handler_entry); + + // Store the current pc as the handler offset. It's used later to create the + // handler table. + masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos()); + + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. Coming in here the + // fp will be invalid because the PushStackHandler below sets it to 0 to + // signal the existence of the JSEntry frame. + __ li(s1, ExternalReference::Create( + IsolateAddressId::kPendingExceptionAddress, masm->isolate())); + __ Sd(a0, MemOperand(s1)); // We come back from 'invoke'. result is in a0. + __ LoadRoot(a0, RootIndex::kException); + __ BranchShort(&exit); + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + __ PushStackHandler(); + // If an exception not caught by another handler occurs, this handler + // returns control to the code after the bal(&invoke) above, which + // restores all kCalleeSaved registers (including cp and fp) to their + // saved values before returning a failure to C. + // + // Registers: + // either + // a0: root register value + // a1: entry address + // a2: function + // a3: receiver + // a4: argc + // a5: argv + // or + // a0: root register value + // a1: microtask_queue + // + // Stack: + // handler frame + // entry frame + // callee saved registers + ra + // [ O32: 4 args slots] + // args + // + // Invoke the function by calling through JS entry trampoline builtin and + // pop the faked function when we return. + + Handle trampoline_code = + masm->isolate()->builtins()->code_handle(entry_trampoline); + __ Call(trampoline_code, RelocInfo::CODE_TARGET); + + // Unlink this frame from the handler chain. + __ PopStackHandler(); + + __ bind(&exit); // a0 holds result + // Check if the current stack frame is marked as the outermost JS frame. + Label non_outermost_js_2; + __ pop(a5); + __ Branch(&non_outermost_js_2, ne, a5, + Operand(StackFrame::OUTERMOST_JSENTRY_FRAME), + Label::Distance::kNear); + __ li(a5, js_entry_sp); + __ Sd(zero_reg, MemOperand(a5)); + __ bind(&non_outermost_js_2); + + // Restore the top frame descriptors from the stack. + __ pop(a5); + __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ Sd(a5, MemOperand(a4)); + + // Reset the stack to the callee saved registers. + __ Add64(sp, sp, -EntryFrameConstants::kCallerFPOffset); + + // Restore callee-saved fpu registers. + __ MultiPopFPU(kCalleeSavedFPU); + + // Restore callee saved registers from the stack. + __ MultiPop(kCalleeSaved | ra); + // Return. + __ Jump(ra); +} + +} // namespace + +void Builtins::Generate_JSEntry(MacroAssembler* masm) { + Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline); +} + +void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) { + Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY, + Builtin::kJSConstructEntryTrampoline); +} + +void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) { + Generate_JSEntryVariant(masm, StackFrame::ENTRY, + Builtin::kRunMicrotasksTrampoline); +} + +static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, + bool is_construct) { + // ----------- S t a t e ------------- + // -- a1: new.target + // -- a2: function + // -- a3: receiver_pointer + // -- a4: argc + // -- a5: argv + // ----------------------------------- + + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Setup the context (we need to use the caller context from the isolate). + ExternalReference context_address = ExternalReference::Create( + IsolateAddressId::kContextAddress, masm->isolate()); + __ li(cp, context_address); + __ Ld(cp, MemOperand(cp)); + + // Push the function onto the stack. + __ Push(a2); + + // Check if we have enough stack space to push all arguments. + __ mv(a6, a4); + Generate_CheckStackOverflow(masm, a6, a0, s2); + + // Copy arguments to the stack. + // a4: argc + // a5: argv, i.e. points to first arg + { + UseScratchRegisterScope temps(masm); + Generate_PushArguments(masm, a5, a4, temps.Acquire(), temps.Acquire(), + ArgumentsElementType::kHandle); + } + // Push the receive. + __ Push(a3); + + // a0: argc + // a1: function + // a3: new.target + __ Move(a3, a1); + __ Move(a1, a2); + __ Move(a0, a4); + + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(a4, RootIndex::kUndefinedValue); + __ Move(a5, a4); + __ Move(s1, a4); + __ Move(s2, a4); + __ Move(s3, a4); + __ Move(s4, a4); + __ Move(s5, a4); +#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE + __ Move(s11, a4); +#endif + // s6 holds the root address. Do not clobber. + // s7 is cp. Do not init. + + // Invoke the code. + Handle builtin = is_construct + ? BUILTIN_CODE(masm->isolate(), Construct) + : masm->isolate()->builtins()->Call(); + __ Call(builtin, RelocInfo::CODE_TARGET); + + // Leave internal frame. + } + __ Jump(ra); +} + +void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, false); +} + +void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, true); +} + +void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { + // a1: microtask_queue + __ Move(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1); + __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); +} + +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { + ASM_CODE_COMMENT(masm); + DCHECK(!AreAliased(optimized_code, closure)); + // Store code entry in the closure. + __ StoreTaggedField(optimized_code, + FieldMemOperand(closure, JSFunction::kCodeOffset)); + __ Move(scratch1, optimized_code); // Write barrier clobbers scratch1 below. + __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); +} + +static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, + Register scratch2) { + ASM_CODE_COMMENT(masm); + Register params_size = scratch1; + + // Get the size of the formal parameters + receiver (in bytes). + __ Ld(params_size, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Lw(params_size, + FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset)); + + Register actual_params_size = scratch2; + Label L1; + // Compute the size of the actual parameters + receiver (in bytes). + __ Ld(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); + __ Sll64(actual_params_size, actual_params_size, kSystemPointerSizeLog2); + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + __ Branch(&L1, le, actual_params_size, Operand(params_size), + Label::Distance::kNear); + __ Move(params_size, actual_params_size); + __ bind(&L1); + + // Leave the frame (also dropping the register file). + __ LeaveFrame(StackFrame::INTERPRETED); + + // Drop receiver + arguments. + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); +} + +// Tail-call |function_id| if |actual_state| == |expected_state| +static void TailCallRuntimeIfStateEquals(MacroAssembler* masm, + Register actual_state, + TieringState expected_state, + Runtime::FunctionId function_id) { + ASM_CODE_COMMENT(masm); + Label no_match; + __ Branch(&no_match, ne, actual_state, + Operand(static_cast(expected_state)), Label::Distance::kNear); + GenerateTailCallToReturnedCode(masm, function_id); + __ bind(&no_match); +} + +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch1, Register scratch2) { + // ----------- S t a t e ------------- + // -- a0 : actual argument count + // -- a3 : new target (preserved for callee if needed, and caller) + // -- a1 : target function (preserved for callee if needed, and caller) + // ----------------------------------- + ASM_CODE_COMMENT(masm); + DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2)); + + Register closure = a1; + Label heal_optimized_code_slot; + + // If the optimized code is cleared, go to runtime to update the optimization + // marker field. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, + &heal_optimized_code_slot); + + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + __ LoadTaggedPointerField( + a5, + FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset)); + __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit)); + __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg), + Label::Distance::kNear); + + // Optimized code is good, get it into the closure and link the closure into + // the optimized functions list, then tail call the optimized code. + // The feedback vector is no longer used, so re-use it as a scratch + // register. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch1, scratch2); + + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ LoadCodeObjectEntry(a2, optimized_code_entry); + __ Jump(a2); + + // Optimized code slot contains deoptimized code or code is cleared and + // optimized code marker isn't updated. Evict the code, update the marker + // and re-enter the closure's code. + __ bind(&heal_optimized_code_slot); + GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot); +} + +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register tiering_state) { + // ----------- S t a t e ------------- + // -- a0 : actual argument count + // -- a3 : new target (preserved for callee if needed, and caller) + // -- a1 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- tiering_state : a int32 containing a non-zero optimization + // marker. + // ----------------------------------- + ASM_CODE_COMMENT(masm); + DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfStateEquals(masm, tiering_state, + TieringState::kRequestTurbofan_Synchronous, + Runtime::kCompileTurbofan_Synchronous); + TailCallRuntimeIfStateEquals(masm, tiering_state, + TieringState::kRequestTurbofan_Concurrent, + Runtime::kCompileTurbofan_Concurrent); + + __ stop(); +} + +// Advance the current bytecode offset. This simulates what all bytecode +// handlers do upon completion of the underlying operation. Will bail out to a +// label if the bytecode (without prefix) is a return bytecode. Will not advance +// the bytecode offset if the current bytecode is a JumpLoop, instead just +// re-executing the JumpLoop to jump to the correct bytecode. +static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, + Register bytecode_array, + Register bytecode_offset, + Register bytecode, Register scratch1, + Register scratch2, Register scratch3, + Label* if_return) { + ASM_CODE_COMMENT(masm); + Register bytecode_size_table = scratch1; + + // The bytecode offset value will be increased by one in wide and extra wide + // cases. In the case of having a wide or extra wide JumpLoop bytecode, we + // will restore the original bytecode. In order to simplify the code, we have + // a backup of it. + Register original_bytecode_offset = scratch3; + DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode, + bytecode_size_table, original_bytecode_offset)); + __ Move(original_bytecode_offset, bytecode_offset); + __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); + + // Check if the bytecode is a Wide or ExtraWide prefix bytecode. + Label process_bytecode, extra_wide; + STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); + STATIC_ASSERT(1 == static_cast(interpreter::Bytecode::kExtraWide)); + STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); + STATIC_ASSERT(3 == + static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); + __ Branch(&process_bytecode, Ugreater, bytecode, Operand(3), + Label::Distance::kNear); + __ And(scratch2, bytecode, Operand(1)); + __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg), + Label::Distance::kNear); + + // Load the next bytecode and update table to the wide scaled table. + __ Add64(bytecode_offset, bytecode_offset, Operand(1)); + __ Add64(scratch2, bytecode_array, bytecode_offset); + __ Lbu(bytecode, MemOperand(scratch2)); + __ Add64(bytecode_size_table, bytecode_size_table, + Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount)); + __ BranchShort(&process_bytecode); + + __ bind(&extra_wide); + // Load the next bytecode and update table to the extra wide scaled table. + __ Add64(bytecode_offset, bytecode_offset, Operand(1)); + __ Add64(scratch2, bytecode_array, bytecode_offset); + __ Lbu(bytecode, MemOperand(scratch2)); + __ Add64(bytecode_size_table, bytecode_size_table, + Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount)); + + __ bind(&process_bytecode); + +// Bailout to the return label if this is a return bytecode. +#define JUMP_IF_EQUAL(NAME) \ + __ Branch(if_return, eq, bytecode, \ + Operand(static_cast(interpreter::Bytecode::k##NAME))); + RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) +#undef JUMP_IF_EQUAL + + // If this is a JumpLoop, re-execute it to perform the jump to the beginning + // of the loop. + Label end, not_jump_loop; + __ Branch(¬_jump_loop, ne, bytecode, + Operand(static_cast(interpreter::Bytecode::kJumpLoop)), + Label::Distance::kNear); + // We need to restore the original bytecode_offset since we might have + // increased it to skip the wide / extra-wide prefix bytecode. + __ Move(bytecode_offset, original_bytecode_offset); + __ BranchShort(&end); + + __ bind(¬_jump_loop); + // Otherwise, load the size of the current bytecode and advance the offset. + __ Add64(scratch2, bytecode_size_table, bytecode); + __ Lb(scratch2, MemOperand(scratch2)); + __ Add64(bytecode_offset, bytecode_offset, scratch2); + + __ bind(&end); +} + +// Read off the optimization state in the feedback vector and check if there +// is optimized code or a tiering state that needs to be processed. +static void LoadTieringStateAndJumpIfNeedsProcessing( + MacroAssembler* masm, Register optimization_state, Register feedback_vector, + Label* has_optimized_code_or_state) { + ASM_CODE_COMMENT(masm); + DCHECK(!AreAliased(optimization_state, feedback_vector)); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Lw(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); + __ And( + scratch, optimization_state, + Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask)); + __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg)); +} + +static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( + MacroAssembler* masm, Register optimization_state, + Register feedback_vector) { + ASM_CODE_COMMENT(masm); + DCHECK(!AreAliased(optimization_state, feedback_vector)); + UseScratchRegisterScope temps(masm); + temps.Include(t0, t1); + Label maybe_has_optimized_code; + // Check if optimized code marker is available + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ And(scratch, optimization_state, + Operand(FeedbackVector::kTieringStateIsAnyRequestMask)); + __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg), + Label::Distance::kNear); + } + Register tiering_state = optimization_state; + __ DecodeField(tiering_state); + MaybeOptimizeCode(masm, feedback_vector, tiering_state); + + __ bind(&maybe_has_optimized_code); + Register optimized_code_entry = optimization_state; + __ LoadAnyTaggedField( + tiering_state, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.Acquire(), + temps.Acquire()); +} + +namespace { +void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, + Register bytecode_array) { + // Reset code age and the OSR state (optimized to a single write). + static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + __ Sw(zero_reg, + FieldMemOperand(bytecode_array, + BytecodeArray::kOsrUrgencyAndInstallTargetOffset)); +} + +} // namespace + +// static +void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { + UseScratchRegisterScope temps(masm); + temps.Include({kScratchReg, kScratchReg2}); + auto descriptor = + Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); + Register closure = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + // Load the feedback vector from the closure. + Register feedback_vector = temps.Acquire(); + __ Ld(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register type = temps.Acquire(); + __ GetObjectType(feedback_vector, type, type); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, type, + Operand(FEEDBACK_VECTOR_TYPE)); + } + + // Check for an tiering state. + Label has_optimized_code_or_state; + Register optimization_state = temps.Acquire(); + LoadTieringStateAndJumpIfNeedsProcessing( + masm, optimization_state, feedback_vector, &has_optimized_code_or_state); + + // Increment invocation count for the function. + { + UseScratchRegisterScope temps(masm); + Register invocation_count = temps.Acquire(); + __ Lw(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + __ Add32(invocation_count, invocation_count, Operand(1)); + __ Sw(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + } + + FrameScope frame_scope(masm, StackFrame::MANUAL); + { + ASM_CODE_COMMENT_STRING(masm, "Frame Setup"); + // Normally the first thing we'd do here is Push(lr, fp), but we already + // entered the frame in BaselineCompiler::Prologue, as we had to use the + // value lr before the call to this BaselineOutOfLinePrologue builtin. + + Register callee_context = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kCalleeContext); + Register callee_js_function = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + __ Push(callee_context, callee_js_function); + DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); + DCHECK_EQ(callee_js_function, kJSFunctionRegister); + + Register argc = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount); + // We'll use the bytecode for both code age/OSR resetting, and pushing onto + // the frame, so load it into a register. + Register bytecode_array = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); + ResetBytecodeAgeAndOsrState(masm, bytecode_array); + __ Push(argc, bytecode_array); + + // Baseline code frames store the feedback vector where interpreter would + // store the bytecode offset. + if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register invocation_count = temps.Acquire(); + __ GetObjectType(feedback_vector, invocation_count, invocation_count); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count, + Operand(FEEDBACK_VECTOR_TYPE)); + } + // Our stack is currently aligned. We have have to push something along with + // the feedback vector to keep it that way -- we may as well start + // initialising the register frame. + // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves + // `undefined` in the accumulator register, to skip the load in the baseline + // code. + __ Push(feedback_vector); + } + + Label call_stack_guard; + Register frame_size = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kStackFrameSize); + { + ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check"); + // Stack check. This folds the checks for both the interrupt stack limit + // check and the real stack limit into one by just checking for the + // interrupt limit. The interrupt limit is either equal to the real stack + // limit or tighter. By ensuring we have space until that limit after + // building the frame we can quickly precheck both at once. + UseScratchRegisterScope temps(masm); + Register sp_minus_frame_size = temps.Acquire(); + __ Sub64(sp_minus_frame_size, sp, frame_size); + Register interrupt_limit = temps.Acquire(); + __ LoadStackLimit(interrupt_limit, + MacroAssembler::StackLimitKind::kInterruptStackLimit); + __ Branch(&call_stack_guard, Uless, sp_minus_frame_size, + Operand(interrupt_limit)); + } + + // Do "fast" return to the caller pc in lr. + // TODO(v8:11429): Document this frame setup better. + __ Ret(); + + __ bind(&has_optimized_code_or_state); + { + ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); + // Drop the frame created by the baseline call. + __ Pop(ra, fp); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); + __ Trap(); + } + + __ bind(&call_stack_guard); + { + ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); + FrameScope frame_scope(masm, StackFrame::INTERNAL); + // Save incoming new target or generator + __ Push(kJavaScriptCallNewTargetRegister); + __ SmiTag(frame_size); + __ Push(frame_size); + __ CallRuntime(Runtime::kStackGuardWithGap); + __ Pop(kJavaScriptCallNewTargetRegister); + } + __ Ret(); + temps.Exclude({kScratchReg, kScratchReg2}); +} + +// Generate code for entering a JS function with the interpreter. +// On entry to the function the receiver and arguments have been pushed on the +// stack left to right. +// +// The live registers are: +// o a0 : actual argument count +// o a1: the JS function object being called. +// o a3: the incoming new target or generator object +// o cp: our context +// o fp: the caller's frame pointer +// o sp: stack pointer +// o ra: return address +// +// The function builds an interpreter frame. See InterpreterFrameConstants in +// frames-constants.h for its layout. +void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { + Register closure = a1; + Register feedback_vector = a2; + UseScratchRegisterScope temps(masm); + temps.Include(t0, t1); + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + // Get the bytecode array from the function object and load it into + // kInterpreterBytecodeArrayRegister. + __ LoadTaggedPointerField( + kScratchReg, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + kInterpreterBytecodeArrayRegister, + FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); + Label is_baseline; + GetSharedFunctionInfoBytecodeOrBaseline( + masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline); + + // The bytecode array could have been flushed from the shared function info, + // if so, call into CompileLazy. + Label compile_lazy; + __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg); + __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE)); + + // Load the feedback vector from the closure. + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ LoadTaggedPointerField( + a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); + __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE), + Label::Distance::kNear); + + // Read off the optimization state in the feedback vector, and if there + // is optimized code or an tiering state, call that instead. + Register optimization_state = a4; + __ Lw(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); + + // Check if the optimized code slot is not empty or has a tiering state. + Label has_optimized_code_or_state; + + __ And(scratch, optimization_state, + FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask); + __ Branch(&has_optimized_code_or_state, ne, scratch, Operand(zero_reg)); + + Label not_optimized; + __ bind(¬_optimized); + + // Increment invocation count for the function. + __ Lw(a4, FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + __ Add32(a4, a4, Operand(1)); + __ Sw(a4, FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ PushStandardFrame(closure); + + ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister); + + // Load initial bytecode offset. + __ li(kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + + // Push bytecode array and Smi tagged bytecode array offset. + __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); + __ Push(kInterpreterBytecodeArrayRegister, a4); + + // Allocate the local and temporary register file on the stack. + Label stack_overflow; + { + // Load frame size (word) from the BytecodeArray object. + __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kFrameSizeOffset)); + + // Do a stack check to ensure we don't go over the limit. + __ Sub64(a5, sp, Operand(a4)); + __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit); + __ Branch(&stack_overflow, Uless, a5, Operand(a2)); + + // If ok, push undefined as the initial value for all register file entries. + Label loop_header; + Label loop_check; + __ LoadRoot(a5, RootIndex::kUndefinedValue); + __ BranchShort(&loop_check); + __ bind(&loop_header); + // TODO(rmcilroy): Consider doing more than one push per loop iteration. + __ push(a5); + // Continue loop if not done. + __ bind(&loop_check); + __ Sub64(a4, a4, Operand(kSystemPointerSize)); + __ Branch(&loop_header, ge, a4, Operand(zero_reg)); + } + + // If the bytecode array has a valid incoming new target or generator object + // register, initialize it with incoming value which was passed in a3. + Label no_incoming_new_target_or_generator_register; + __ Lw(a5, FieldMemOperand( + kInterpreterBytecodeArrayRegister, + BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); + __ Branch(&no_incoming_new_target_or_generator_register, eq, a5, + Operand(zero_reg), Label::Distance::kNear); + __ CalcScaledAddress(a5, fp, a5, kSystemPointerSizeLog2); + __ Sd(a3, MemOperand(a5)); + __ bind(&no_incoming_new_target_or_generator_register); + + // Perform interrupt stack check. + // TODO(solanes): Merge with the real stack limit check above. + Label stack_check_interrupt, after_stack_check_interrupt; + __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit); + __ Branch(&stack_check_interrupt, Uless, sp, Operand(a5), + Label::Distance::kNear); + __ bind(&after_stack_check_interrupt); + + // Load accumulator as undefined. + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + + // Load the dispatch table into a register and dispatch to the bytecode + // handler at the current bytecode offset. + Label do_dispatch; + __ bind(&do_dispatch); + __ li(kInterpreterDispatchTableRegister, + ExternalReference::interpreter_dispatch_table_address(masm->isolate())); + __ Add64(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ Lbu(a7, MemOperand(a1)); + __ CalcScaledAddress(kScratchReg, kInterpreterDispatchTableRegister, a7, + kSystemPointerSizeLog2); + __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg)); + __ Call(kJavaScriptCallCodeStartRegister); + masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); + + // Any returns to the entry trampoline are either due to the return bytecode + // or the interpreter tail calling a builtin and then a dispatch. + + // Get bytecode array and bytecode offset from the stack frame. + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Ld(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + + // Either return, or advance to the next bytecode and dispatch. + Label do_return; + __ Add64(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ Lbu(a1, MemOperand(a1)); + AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, a1, a2, a3, + a4, &do_return); + __ Branch(&do_dispatch); + + __ bind(&do_return); + // The return value is in a0. + LeaveInterpreterFrame(masm, scratch, scratch2); + __ Jump(ra); + + __ bind(&stack_check_interrupt); + // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset + // for the call to the StackGuard. + __ li(kInterpreterBytecodeOffsetRegister, + Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset))); + __ Sd(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ CallRuntime(Runtime::kStackGuard); + + // After the call, restore the bytecode array, bytecode offset and accumulator + // registers again. Also, restore the bytecode offset in the stack to its + // previous value. + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ li(kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + + __ SmiTag(a5, kInterpreterBytecodeOffsetRegister); + __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + + __ Branch(&after_stack_check_interrupt); + + __ bind(&has_optimized_code_or_state); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); + __ bind(&is_baseline); + { + // Load the feedback vector from the closure. + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + __ LoadTaggedPointerField( + scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ Branch(&install_baseline_code, ne, scratch, + Operand(FEEDBACK_VECTOR_TYPE)); + + // Check for an tiering state. + LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, + feedback_vector, + &has_optimized_code_or_state); + + // Load the baseline code into the closure. + __ Move(a2, kInterpreterBytecodeArrayRegister); + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, scratch, scratch2); + __ JumpCodeObject(a2); + + __ bind(&install_baseline_code); + GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode); + } + + __ bind(&compile_lazy); + GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + // Unreachable code. + __ break_(0xCC); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); +} + +static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, + Register start_address, + Register scratch) { + ASM_CODE_COMMENT(masm); + // Find the address of the last argument. + __ Sub64(scratch, num_args, Operand(1)); + __ Sll64(scratch, scratch, kSystemPointerSizeLog2); + __ Sub64(start_address, start_address, scratch); + + // Push the arguments. + __ PushArray(start_address, num_args, + TurboAssembler::PushArrayOrder::kReverse); +} + +// static +void Builtins::Generate_InterpreterPushArgsThenCallImpl( + MacroAssembler* masm, ConvertReceiverMode receiver_mode, + InterpreterPushArgsMode mode) { + DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a2 : the address of the first argument to be pushed. Subsequent + // arguments should be consecutive above this, in the same order as + // they are to be pushed onto the stack. + // -- a1 : the target to call (can be any Object). + // ----------------------------------- + Label stack_overflow; + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // The spread argument should not be pushed. + __ Sub64(a0, a0, Operand(1)); + } + + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { + __ Sub64(a3, a0, Operand(kJSArgcReceiverSlots)); + } else { + __ Move(a3, a0); + } + __ StackOverflowCheck(a3, a4, t0, &stack_overflow); + + // This function modifies a2 and a4. + GenerateInterpreterPushArgs(masm, a3, a2, a4); + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { + __ PushRoot(RootIndex::kUndefinedValue); + } + + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // Pass the spread in the register a2. + // a2 already points to the penultime argument, the spread + // is below that. + __ Ld(a2, MemOperand(a2, -kSystemPointerSize)); + } + + // Call the target. + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), + RelocInfo::CODE_TARGET); + } else { + __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), + RelocInfo::CODE_TARGET); + } + + __ bind(&stack_overflow); + { + __ TailCallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); + } +} + +// static +void Builtins::Generate_InterpreterPushArgsThenConstructImpl( + MacroAssembler* masm, InterpreterPushArgsMode mode) { + // ----------- S t a t e ------------- + // -- a0 : argument count + // -- a3 : new target + // -- a1 : constructor to call + // -- a2 : allocation site feedback if available, undefined otherwise. + // -- a4 : address of the first argument + // ----------------------------------- + Label stack_overflow; + __ StackOverflowCheck(a0, a5, t0, &stack_overflow); + + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // The spread argument should not be pushed. + __ Sub64(a0, a0, Operand(1)); + } + Register argc_without_receiver = a6; + __ Sub64(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots)); + // Push the arguments, This function modifies a4 and a5. + GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5); + + // Push a slot for the receiver. + __ push(zero_reg); + + if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // Pass the spread in the register a2. + // a4 already points to the penultimate argument, the spread + // lies in the next interpreter register. + __ Ld(a2, MemOperand(a4, -kSystemPointerSize)); + } else { + __ AssertUndefinedOrAllocationSite(a2, t0); + } + + if (mode == InterpreterPushArgsMode::kArrayFunction) { + __ AssertFunction(a1); + + // Tail call to the function-specific construct stub (still in the caller + // context at this point). + __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), + RelocInfo::CODE_TARGET); + } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + // Call the constructor with a0, a1, and a3 unmodified. + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), + RelocInfo::CODE_TARGET); + } else { + DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); + // Call the constructor with a0, a1, and a3 unmodified. + __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); + } + + __ bind(&stack_overflow); + { + __ TailCallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); + } +} + +static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { + // Set the return address to the correct point in the interpreter entry + // trampoline. + Label builtin_trampoline, trampoline_loaded; + Smi interpreter_entry_return_pc_offset( + masm->isolate()->heap()->interpreter_entry_return_pc_offset()); + DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); + + // If the SFI function_data is an InterpreterData, the function will have a + // custom copy of the interpreter entry trampoline for profiling. If so, + // get the custom trampoline, otherwise grab the entry address of the global + // trampoline. + __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + __ LoadTaggedPointerField( + t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); + __ GetObjectType(t0, kInterpreterDispatchTableRegister, + kInterpreterDispatchTableRegister); + __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, + Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear); + + __ LoadTaggedPointerField( + t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); + __ Add64(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ BranchShort(&trampoline_loaded); + + __ bind(&builtin_trampoline); + __ li(t0, ExternalReference:: + address_of_interpreter_entry_trampoline_instruction_start( + masm->isolate())); + __ Ld(t0, MemOperand(t0)); + + __ bind(&trampoline_loaded); + __ Add64(ra, t0, Operand(interpreter_entry_return_pc_offset.value())); + + // Initialize the dispatch table register. + __ li(kInterpreterDispatchTableRegister, + ExternalReference::interpreter_dispatch_table_address(masm->isolate())); + + // Get the bytecode array pointer from the frame. + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + + if (FLAG_debug_code) { + // Check function data field is actually a BytecodeArray object. + __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + kScratchReg, Operand(zero_reg)); + __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); + __ Assert(eq, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + a1, Operand(BYTECODE_ARRAY_TYPE)); + } + + // Get the target bytecode offset from the frame. + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + + if (FLAG_debug_code) { + Label okay; + __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag), + Label::Distance::kNear); + // Unreachable code. + __ break_(0xCC); + __ bind(&okay); + } + + // Dispatch to the target bytecode. + __ Add64(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ Lbu(a7, MemOperand(a1)); + __ CalcScaledAddress(a1, kInterpreterDispatchTableRegister, a7, + kSystemPointerSizeLog2); + __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1)); + __ Jump(kJavaScriptCallCodeStartRegister); +} + +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { + // Advance the current bytecode offset stored within the given interpreter + // stack frame. This simulates what all bytecode handlers do upon completion + // of the underlying operation. + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Ld(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + + Label enter_bytecode, function_entry_bytecode; + __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset)); + + // Load the current bytecode. + __ Add64(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ Lbu(a1, MemOperand(a1)); + + // Advance to the next bytecode. + Label if_return; + AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, a1, a2, a3, + a4, &if_return); + + __ bind(&enter_bytecode); + // Convert new bytecode offset to a Smi and save in the stackframe. + __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); + __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + + Generate_InterpreterEnterBytecode(masm); + + __ bind(&function_entry_bytecode); + // If the code deoptimizes during the implicit function entry stack interrupt + // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is + // not a valid bytecode offset. Detect this case and advance to the first + // actual bytecode. + __ li(kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + __ Branch(&enter_bytecode); + + // We should never take the if_return path. + __ bind(&if_return); + __ Abort(AbortReason::kInvalidBytecodeAdvance); +} + +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { + Generate_InterpreterEnterBytecode(masm); +} + +namespace { +void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, + bool java_script_builtin, + bool with_result) { + const RegisterConfiguration* config(RegisterConfiguration::Default()); + int allocatable_register_count = config->num_allocatable_general_registers(); + UseScratchRegisterScope temp(masm); + Register scratch = temp.Acquire(); + if (with_result) { + if (java_script_builtin) { + __ Move(scratch, a0); + } else { + // Overwrite the hole inserted by the deoptimizer with the return value + // from the LAZY deopt point. + __ Sd(a0, + MemOperand(sp, + config->num_allocatable_general_registers() * + kSystemPointerSize + + BuiltinContinuationFrameConstants::kFixedFrameSize)); + } + } + for (int i = allocatable_register_count - 1; i >= 0; --i) { + int code = config->GetAllocatableGeneralCode(i); + __ Pop(Register::from_code(code)); + if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { + __ SmiUntag(Register::from_code(code)); + } + } + + if (with_result && java_script_builtin) { + // Overwrite the hole inserted by the deoptimizer with the return value from + // the LAZY deopt point. t0 contains the arguments count, the return value + // from LAZY is always the last argument. + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ Add64(a0, a0, Operand(return_value_offset)); + __ CalcScaledAddress(t0, sp, a0, kSystemPointerSizeLog2); + __ Sd(scratch, MemOperand(t0)); + // Recover arguments count. + __ Sub64(a0, a0, Operand(return_value_offset)); + } + + __ Ld(fp, MemOperand( + sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. + __ Pop(t6); + __ Add64(sp, sp, + Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + __ Pop(ra); + __ LoadEntryFromBuiltinIndex(t6); + __ Jump(t6); +} +} // namespace + +void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, false, false); +} + +void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( + MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, false, true); +} + +void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, true, false); +} + +void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( + MacroAssembler* masm) { + Generate_ContinueToBuiltinHelper(masm, true, true); +} + +void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyDeoptimized); + } + + DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code()); + __ Ld(a0, MemOperand(sp, 0 * kSystemPointerSize)); + __ Add64(sp, sp, Operand(1 * kSystemPointerSize)); // Remove state. + __ Ret(); +} + +namespace { + +void Generate_OSREntry(MacroAssembler* masm, Register entry_address, + Operand offset = Operand(int64_t(0))) { + __ Add64(ra, entry_address, offset); + // And "return" to the OSR entry point of the function. + __ Ret(); +} + +void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { + ASM_CODE_COMMENT(masm); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kCompileOptimizedOSR); + } + + // If the code object is null, just return to the caller. + __ Ret(eq, a0, Operand(Smi::zero())); + if (is_interpreter) { + // Drop the handler frame that is be sitting on top of the actual + // JavaScript frame. This is the case then OSR is triggered from bytecode. + __ LeaveFrame(StackFrame::STUB); + } + // Load deoptimization data from the code object. + // = [#deoptimization_data_offset] + __ LoadTaggedPointerField( + a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset - + kHeapObjectTag)); + + // Load the OSR entrypoint offset from the deoptimization data. + // = [#header_size + #osr_pc_offset] + __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( + DeoptimizationData::kOsrPcOffsetIndex) - + kHeapObjectTag)); + + // Compute the target address = code_obj + header_size + osr_offset + // = + #header_size + + __ Add64(a0, a0, a1); + Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag)); +} +} // namespace + +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { + return OnStackReplacement(masm, true); +} + +void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ Ld(kContextRegister, + MemOperand(fp, StandardFrameConstants::kContextOffset)); + return OnStackReplacement(masm, false); +} + +// static +void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : receiver + // -- sp[4] : thisArg + // -- sp[8] : argArray + // ----------------------------------- + + Register argc = a0; + Register arg_array = a2; + Register receiver = a1; + Register this_arg = a5; + Register undefined_value = a3; + + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); + + // 1. Load receiver into a1, argArray into a2 (if present), remove all + // arguments from the stack (including the receiver), and push thisArg (if + // present) instead. + { + // Claim (2 - argc) dummy arguments form the stack, to put the stack in a + // consistent state for a simple pop operation. + + __ Ld(this_arg, MemOperand(sp, kSystemPointerSize)); + __ Ld(arg_array, MemOperand(sp, 2 * kSystemPointerSize)); + + Label done0, done1; + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Sub64(scratch, argc, JSParameterCount(0)); + __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear); + __ Move(arg_array, undefined_value); // if argc == 0 + __ Move(this_arg, undefined_value); // if argc == 0 + __ bind(&done0); // else (i.e., argc > 0) + + __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear); + __ Move(arg_array, undefined_value); // if argc == 1 + __ bind(&done1); // else (i.e., argc > 1) + + __ Ld(receiver, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver(argc, this_arg, + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); + } + + // ----------- S t a t e ------------- + // -- a2 : argArray + // -- a1 : receiver + // -- a3 : undefined root value + // -- sp[0] : thisArg + // ----------------------------------- + + // 2. We don't need to check explicitly for callable receiver here, + // since that's the first thing the Call/CallWithArrayLike builtins + // will do. + + // 3. Tail call with no arguments if argArray is null or undefined. + Label no_arguments; + __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); + __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value), + Label::Distance::kNear); + + // 4a. Apply the receiver to the given argArray. + __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), + RelocInfo::CODE_TARGET); + + // 4b. The argArray is either null or undefined, so we tail call without any + // arguments to the receiver. + __ bind(&no_arguments); + { + __ li(a0, JSParameterCount(0)); + DCHECK(receiver == a1); + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + } +} + +// static +void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { + // 1. Get the callable to call (passed as receiver) from the stack. + { __ Pop(a1); } + + // 2. Make sure we have at least one argument. + // a0: actual number of arguments + { + Label done; + __ Branch(&done, ne, a0, Operand(JSParameterCount(0)), + Label::Distance::kNear); + __ PushRoot(RootIndex::kUndefinedValue); + __ Add64(a0, a0, Operand(1)); + __ bind(&done); + } + + // 3. Adjust the actual number of arguments. + __ Add64(a0, a0, -1); + + // 4. Call the callable. + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); +} + +void Builtins::Generate_ReflectApply(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : receiver + // -- sp[8] : target (if argc >= 1) + // -- sp[16] : thisArgument (if argc >= 2) + // -- sp[24] : argumentsList (if argc == 3) + // ----------------------------------- + + Register argc = a0; + Register arguments_list = a2; + Register target = a1; + Register this_argument = a5; + Register undefined_value = a3; + + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); + + // 1. Load target into a1 (if present), argumentsList into a2 (if present), + // remove all arguments from the stack (including the receiver), and push + // thisArgument (if present) instead. + { + // Claim (3 - argc) dummy arguments form the stack, to put the stack in a + // consistent state for a simple pop operation. + + __ Ld(target, MemOperand(sp, kSystemPointerSize)); + __ Ld(this_argument, MemOperand(sp, 2 * kSystemPointerSize)); + __ Ld(arguments_list, MemOperand(sp, 3 * kSystemPointerSize)); + + Label done0, done1, done2; + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Sub64(scratch, argc, Operand(JSParameterCount(0))); + __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear); + __ Move(arguments_list, undefined_value); // if argc == 0 + __ Move(this_argument, undefined_value); // if argc == 0 + __ Move(target, undefined_value); // if argc == 0 + __ bind(&done0); // argc != 0 + + __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear); + __ Move(arguments_list, undefined_value); // if argc == 1 + __ Move(this_argument, undefined_value); // if argc == 1 + __ bind(&done1); // argc > 1 + + __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear); + __ Move(arguments_list, undefined_value); // if argc == 2 + __ bind(&done2); // argc > 2 + + __ DropArgumentsAndPushNewReceiver(argc, this_argument, + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); + } + + // ----------- S t a t e ------------- + // -- a2 : argumentsList + // -- a1 : target + // -- a3 : undefined root value + // -- sp[0] : thisArgument + // ----------------------------------- + + // 2. We don't need to check explicitly for callable target here, + // since that's the first thing the Call/CallWithArrayLike builtins + // will do. + + // 3. Apply the target to the given argumentsList. + __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), + RelocInfo::CODE_TARGET); +} + +void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : receiver + // -- sp[8] : target + // -- sp[16] : argumentsList + // -- sp[24] : new.target (optional) + // ----------------------------------- + Register argc = a0; + Register arguments_list = a2; + Register target = a1; + Register new_target = a3; + Register undefined_value = a4; + + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); + + // 1. Load target into a1 (if present), argumentsList into a2 (if present), + // new.target into a3 (if present, otherwise use target), remove all + // arguments from the stack (including the receiver), and push thisArgument + // (if present) instead. + { + // Claim (3 - argc) dummy arguments form the stack, to put the stack in a + // consistent state for a simple pop operation. + __ Ld(target, MemOperand(sp, kSystemPointerSize)); + __ Ld(arguments_list, MemOperand(sp, 2 * kSystemPointerSize)); + __ Ld(new_target, MemOperand(sp, 3 * kSystemPointerSize)); + + Label done0, done1, done2; + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Sub64(scratch, argc, Operand(JSParameterCount(0))); + __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear); + __ Move(arguments_list, undefined_value); // if argc == 0 + __ Move(new_target, undefined_value); // if argc == 0 + __ Move(target, undefined_value); // if argc == 0 + __ bind(&done0); + + __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear); + __ Move(arguments_list, undefined_value); // if argc == 1 + __ Move(new_target, target); // if argc == 1 + __ bind(&done1); + + __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear); + __ Move(new_target, target); // if argc == 2 + __ bind(&done2); + + __ DropArgumentsAndPushNewReceiver(argc, undefined_value, + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); + } + + // ----------- S t a t e ------------- + // -- a2 : argumentsList + // -- a1 : target + // -- a3 : new.target + // -- sp[0] : receiver (undefined) + // ----------------------------------- + + // 2. We don't need to check explicitly for constructor target here, + // since that's the first thing the Construct/ConstructWithArrayLike + // builtins will do. + + // 3. We don't need to check explicitly for constructor new.target here, + // since that's the second thing the Construct/ConstructWithArrayLike + // builtins will do. + + // 4. Construct the target with the given new.target and argumentsList. + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike), + RelocInfo::CODE_TARGET); +} + +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out) { + UseScratchRegisterScope temps(masm); + Register scratch1 = temps.Acquire(); + Register scratch2 = temps.Acquire(); + Register scratch3 = temps.Acquire(); + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ mv(old_sp, sp); + __ slli(new_space, count, kPointerSizeLog2); + __ Sub64(sp, sp, Operand(new_space)); + + Register end = scratch2; + Register value = scratch3; + Register dest = pointer_to_new_space_out; + __ mv(dest, sp); + __ CalcScaledAddress(end, old_sp, argc_in_out, kSystemPointerSizeLog2); + Label loop, done; + __ Branch(&done, ge, old_sp, Operand(end)); + __ bind(&loop); + __ Ld(value, MemOperand(old_sp, 0)); + __ Sd(value, MemOperand(dest, 0)); + __ Add64(old_sp, old_sp, Operand(kSystemPointerSize)); + __ Add64(dest, dest, Operand(kSystemPointerSize)); + __ Branch(&loop, lt, old_sp, Operand(end)); + __ bind(&done); + + // Update total number of arguments. + __ Add64(argc_in_out, argc_in_out, count); +} + +} // namespace + +// static +void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, + Handle code) { + UseScratchRegisterScope temps(masm); + temps.Include(t1, t0); + // ----------- S t a t e ------------- + // -- a1 : target + // -- a0 : number of parameters on the stack + // -- a2 : arguments list (a FixedArray) + // -- a4 : len (number of elements to push from args) + // -- a3 : new.target (for [[Construct]]) + // ----------------------------------- + if (FLAG_debug_code) { + // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0. + Label ok, fail; + __ AssertNotSmi(a2); + __ GetObjectType(a2, kScratchReg, kScratchReg); + __ Branch(&ok, eq, kScratchReg, Operand(FIXED_ARRAY_TYPE), + Label::Distance::kNear); + __ Branch(&fail, ne, kScratchReg, Operand(FIXED_DOUBLE_ARRAY_TYPE), + Label::Distance::kNear); + __ Branch(&ok, eq, a4, Operand(zero_reg), Label::Distance::kNear); + // Fall through. + __ bind(&fail); + __ Abort(AbortReason::kOperandIsNotAFixedArray); + + __ bind(&ok); + } + + Register args = a2; + Register len = a4; + + // Check for stack overflow. + Label stack_overflow; + __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow); + + // Move the arguments already in the stack, + // including the receiver and the return address. + // a4: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a7: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7); + + // Push arguments onto the stack (thisArgument is already on the stack). + { + Label done, push, loop; + Register src = a6; + Register scratch = len; + UseScratchRegisterScope temps(masm); + Register hole_value = temps.Acquire(); + __ Add64(src, args, FixedArray::kHeaderSize - kHeapObjectTag); + __ Branch(&done, eq, len, Operand(zero_reg), Label::Distance::kNear); + __ Sll64(scratch, len, kTaggedSizeLog2); + __ Sub64(scratch, sp, Operand(scratch)); + __ LoadRoot(hole_value, RootIndex::kTheHoleValue); + __ bind(&loop); + __ LoadTaggedPointerField(a5, MemOperand(src)); + __ Add64(src, src, kTaggedSize); + __ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear); + __ LoadRoot(a5, RootIndex::kUndefinedValue); + __ bind(&push); + __ Sd(a5, MemOperand(a7, 0)); + __ Add64(a7, a7, Operand(kSystemPointerSize)); + __ Add64(scratch, scratch, Operand(kTaggedSize)); + __ Branch(&loop, ne, scratch, Operand(sp)); + __ bind(&done); + } + + // Tail-call to the actual Call or Construct builtin. + __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); +} + +// static +void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, + CallOrConstructMode mode, + Handle code) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a3 : the new.target (for [[Construct]] calls) + // -- a1 : the target to call (can be any Object) + // -- a2 : start index (to support rest parameters) + // ----------------------------------- + UseScratchRegisterScope temps(masm); + temps.Include(t0, t1); + temps.Include(t2); + // Check if new.target has a [[Construct]] internal method. + if (mode == CallOrConstructMode::kConstruct) { + Label new_target_constructor, new_target_not_constructor; + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ JumpIfSmi(a3, &new_target_not_constructor); + __ LoadTaggedPointerField(scratch, + FieldMemOperand(a3, HeapObject::kMapOffset)); + __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg), + Label::Distance::kNear); + __ bind(&new_target_not_constructor); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ Push(a3); + __ CallRuntime(Runtime::kThrowNotConstructor); + } + __ bind(&new_target_constructor); + } + + // TODO(victorgomes): Remove this copy when all the arguments adaptor frame + // code is erased. + __ Move(a6, fp); + __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + + Label stack_done, stack_overflow; + __ Sub64(a7, a7, Operand(kJSArgcReceiverSlots)); + __ Sub64(a7, a7, a2); + __ Branch(&stack_done, le, a7, Operand(zero_reg)); + { + // Check for stack overflow. + __ StackOverflowCheck(a7, a4, a5, &stack_overflow); + + // Forward the arguments from the caller frame. + + // Point to the first argument to copy (skipping the receiver). + __ Add64(a6, a6, + Operand(CommonFrameConstants::kFixedFrameSizeAboveFp + + kSystemPointerSize)); + __ CalcScaledAddress(a6, a6, a2, kSystemPointerSizeLog2); + + // Move the arguments already in the stack, + // including the receiver and the return address. + // a7: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a2: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2); + + // Copy arguments from the caller frame. + // TODO(victorgomes): Consider using forward order as potentially more cache + // friendly. + { + Label loop; + __ bind(&loop); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(), addr = temps.Acquire(); + __ Sub32(a7, a7, Operand(1)); + __ CalcScaledAddress(addr, a6, a7, kSystemPointerSizeLog2); + __ Ld(scratch, MemOperand(addr)); + __ CalcScaledAddress(addr, a2, a7, kSystemPointerSizeLog2); + __ Sd(scratch, MemOperand(addr)); + __ Branch(&loop, ne, a7, Operand(zero_reg)); + } + } + } + __ BranchShort(&stack_done); + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); + __ bind(&stack_done); + + // Tail-call to the {code} handler. + __ Jump(code, RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_CallFunction(MacroAssembler* masm, + ConvertReceiverMode mode) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSFunction) + // ----------------------------------- + __ AssertCallableFunction(a1); + + Label class_constructor; + __ LoadTaggedPointerField( + a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); + __ And(kScratchReg, a3, + Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); + __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg)); + + // Enter the context of the function; ToObject has to run in the function + // context, and we also need to take the global proxy from the function + // context in case of conversion. + __ LoadTaggedPointerField(cp, + FieldMemOperand(a1, JSFunction::kContextOffset)); + // We need to convert the receiver for non-native sloppy mode functions. + Label done_convert; + __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); + __ And(kScratchReg, a3, + Operand(SharedFunctionInfo::IsNativeBit::kMask | + SharedFunctionInfo::IsStrictBit::kMask)); + __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); + { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSFunction) + // -- a2 : the shared function info. + // -- cp : the function context. + // ----------------------------------- + + if (mode == ConvertReceiverMode::kNullOrUndefined) { + // Patch receiver to global proxy. + __ LoadGlobalProxy(a3); + } else { + Label convert_to_object, convert_receiver; + __ LoadReceiver(a3, a0); + __ JumpIfSmi(a3, &convert_to_object); + STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); + __ GetObjectType(a3, a4, a4); + __ Branch(&done_convert, Ugreater_equal, a4, + Operand(FIRST_JS_RECEIVER_TYPE)); + if (mode != ConvertReceiverMode::kNotNullOrUndefined) { + Label convert_global_proxy; + __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); + __ bind(&convert_global_proxy); + { + // Patch receiver to global proxy. + __ LoadGlobalProxy(a3); + } + __ Branch(&convert_receiver); + } + __ bind(&convert_to_object); + { + // Convert receiver using ToObject. + // TODO(bmeurer): Inline the allocation here to avoid building the frame + // in the fast case? (fall back to AllocateInNewSpace?) + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(a0); + __ Push(a0, a1); + __ Move(a0, a3); + __ Push(cp); + __ Call(BUILTIN_CODE(masm->isolate(), ToObject), + RelocInfo::CODE_TARGET); + __ Pop(cp); + __ Move(a3, a0); + __ Pop(a0, a1); + __ SmiUntag(a0); + } + __ LoadTaggedPointerField( + a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ bind(&convert_receiver); + } + __ StoreReceiver(a3, a0, kScratchReg); + } + __ bind(&done_convert); + + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSFunction) + // -- a2 : the shared function info. + // -- cp : the function context. + // ----------------------------------- + + __ Lhu(a2, + FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); + __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump); + + // The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } +} + +namespace { + +void Generate_PushBoundArguments(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : target (checked to be a JSBoundFunction) + // -- a3 : new.target (only in case of [[Construct]]) + // ----------------------------------- + UseScratchRegisterScope temps(masm); + temps.Include(t0, t1); + Register bound_argc = a4; + Register bound_argv = a2; + // Load [[BoundArguments]] into a2 and length of that into a4. + Label no_bound_arguments; + __ LoadTaggedPointerField( + bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); + __ SmiUntagField(bound_argc, + FieldMemOperand(bound_argv, FixedArray::kLengthOffset)); + __ Branch(&no_bound_arguments, eq, bound_argc, Operand(zero_reg)); + { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : target (checked to be a JSBoundFunction) + // -- a2 : the [[BoundArguments]] (implemented as FixedArray) + // -- a3 : new.target (only in case of [[Construct]]) + // -- a4: the number of [[BoundArguments]] + // ----------------------------------- + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + Label done; + // Reserve stack space for the [[BoundArguments]]. + { + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack + // limit". + __ StackOverflowCheck(a4, temps.Acquire(), temps.Acquire(), nullptr, + &done); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowStackOverflow); + } + __ bind(&done); + } + + // Pop receiver. + __ Pop(scratch); + + // Push [[BoundArguments]]. + { + Label loop, done_loop; + __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); + __ Add64(a0, a0, Operand(a4)); + __ Add64(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + __ Sub64(a4, a4, Operand(1)); + __ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear); + __ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2); + __ LoadAnyTaggedField(kScratchReg, MemOperand(a5)); + __ Push(kScratchReg); + __ Branch(&loop); + __ bind(&done_loop); + } + + // Push receiver. + __ Push(scratch); + } + __ bind(&no_bound_arguments); +} + +} // namespace + +// static +void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSBoundFunction) + // ----------------------------------- + __ AssertBoundFunction(a1); + + // Patch the receiver to [[BoundThis]]. + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ LoadAnyTaggedField( + scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); + __ StoreReceiver(scratch, a0, kScratchReg); + } + + // Push the [[BoundArguments]] onto the stack. + Generate_PushBoundArguments(masm); + + // Call the [[BoundTargetFunction]] via the Call builtin. + __ LoadTaggedPointerField( + a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), + RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the target to call (can be any Object). + // ----------------------------------- + + Label non_callable, class_constructor; + UseScratchRegisterScope temps(masm); + temps.Include(t1, t2); + temps.Include(t4); + Register map = temps.Acquire(), type = temps.Acquire(), + range = temps.Acquire(); + __ JumpIfSmi(a1, &non_callable); + __ LoadMap(map, a1); + __ GetInstanceTypeRange(map, type, FIRST_CALLABLE_JS_FUNCTION_TYPE, range); + __ Jump(masm->isolate()->builtins()->CallFunction(mode), + RelocInfo::CODE_TARGET, Uless_equal, range, + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); + __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), + RelocInfo::CODE_TARGET, eq, type, Operand(JS_BOUND_FUNCTION_TYPE)); + Register scratch = map; + // Check if target has a [[Call]] internal method. + __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(scratch, scratch, Operand(Map::Bits1::IsCallableBit::kMask)); + __ Branch(&non_callable, eq, scratch, Operand(zero_reg), + Label::Distance::kNear); + + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, + type, Operand(JS_PROXY_TYPE)); + + // Check if target is a wrapped function and call CallWrappedFunction external + // builtin + __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction), + RelocInfo::CODE_TARGET, eq, type, Operand(JS_WRAPPED_FUNCTION_TYPE)); + + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) + // Check that the function is not a "classConstructor". + __ Branch(&class_constructor, eq, type, Operand(JS_CLASS_CONSTRUCTOR_TYPE)); + + // 2. Call to something else, which might have a [[Call]] internal method (if + // not we raise an exception). + // Overwrite the original receiver with the (original) target. + __ StoreReceiver(a1, a0, kScratchReg); + // Let the "call_as_function_delegate" take care of the rest. + __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); + __ Jump(masm->isolate()->builtins()->CallFunction( + ConvertReceiverMode::kNotNullOrUndefined), + RelocInfo::CODE_TARGET); + + // 3. Call to something that is not callable. + __ bind(&non_callable); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kThrowCalledNonCallable); + } + // 4. The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + __ ebreak(); + } +} + +void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the constructor to call (checked to be a JSFunction) + // -- a3 : the new target (checked to be a constructor) + // ----------------------------------- + __ AssertConstructor(a1); + __ AssertFunction(a1); + + // Calling convention for function specific ConstructStubs require + // a2 to contain either an AllocationSite or undefined. + __ LoadRoot(a2, RootIndex::kUndefinedValue); + + Label call_generic_stub; + + // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. + __ LoadTaggedPointerField( + a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ Lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset)); + __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); + __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg), + Label::Distance::kNear); + + __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), + RelocInfo::CODE_TARGET); + + __ bind(&call_generic_stub); + __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), + RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the function to call (checked to be a JSBoundFunction) + // -- a3 : the new target (checked to be a constructor) + // ----------------------------------- + __ AssertBoundFunction(a1); + + // Push the [[BoundArguments]] onto the stack. + Generate_PushBoundArguments(masm); + + // Patch new.target to [[BoundTargetFunction]] if new.target equals target. + Label skip; + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ CmpTagged(scratch, a1, a3); + __ Branch(&skip, ne, scratch, Operand(zero_reg), Label::Distance::kNear); + } + __ LoadTaggedPointerField( + a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ bind(&skip); + + // Construct the [[BoundTargetFunction]] via the Construct builtin. + __ LoadTaggedPointerField( + a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); +} + +// static +void Builtins::Generate_Construct(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments + // -- a1 : the constructor to call (can be any Object) + // -- a3 : the new target (either the same as the constructor or + // the JSFunction on which new was invoked initially) + // ----------------------------------- + + // Check if target is a Smi. + Label non_constructor, non_proxy; + __ JumpIfSmi(a1, &non_constructor); + + // Check if target has a [[Construct]] internal method. + UseScratchRegisterScope temps(masm); + temps.Include(t0, t1); + Register map = temps.Acquire(); + Register scratch = temps.Acquire(); + __ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&non_constructor, eq, scratch, Operand(zero_reg)); + Register range = temps.Acquire(); + // Dispatch based on instance type. + __ GetInstanceTypeRange(map, scratch, FIRST_JS_FUNCTION_TYPE, range); + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), + RelocInfo::CODE_TARGET, Uless_equal, range, + Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + + // Only dispatch to bound functions after checking whether they are + // constructors. + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), + RelocInfo::CODE_TARGET, eq, scratch, Operand(JS_BOUND_FUNCTION_TYPE)); + + // Only dispatch to proxies after checking whether they are constructors. + __ Branch(&non_proxy, ne, scratch, Operand(JS_PROXY_TYPE), + Label::Distance::kNear); + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), + RelocInfo::CODE_TARGET); + + // Called Construct on an exotic Object with a [[Construct]] internal method. + __ bind(&non_proxy); + { + // Overwrite the original receiver with the (original) target. + __ StoreReceiver(a1, a0, kScratchReg); + // Let the "call_as_constructor_delegate" take care of the rest. + __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); + __ Jump(masm->isolate()->builtins()->CallFunction(), + RelocInfo::CODE_TARGET); + } + + // Called Construct on an Object that doesn't have a [[Construct]] internal + // method. + __ bind(&non_constructor); + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable), + RelocInfo::CODE_TARGET); +} + +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { + // The function index was put in t0 by the jump table trampoline. + // Convert to Smi for the runtime call + __ SmiTag(kWasmCompileLazyFuncIndexRegister); + + RegList kSavedGpRegs = ([]() constexpr { + RegList saved_gp_regs; + for (Register gp_param_reg : wasm::kGpParamRegisters) { + saved_gp_regs.set(gp_param_reg); + } + + // All set registers were unique. + CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); + // The Wasm instance must be part of the saved registers. + CHECK(saved_gp_regs.has(kWasmInstanceRegister)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, + saved_gp_regs.Count()); + return saved_gp_regs; + })(); + + DoubleRegList kSavedFpRegs = ([]() constexpr { + DoubleRegList saved_fp_regs; + for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { + saved_fp_regs.set(fp_param_reg); + } + + CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, + saved_fp_regs.Count()); + return saved_fp_regs; + })(); + + { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); + + __ MultiPush(kSavedGpRegs); + __ MultiPushFPU(kSavedFpRegs); + + // Pass instance and function index as an explicit arguments to the runtime + // function. + __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); + // Initialize the JavaScript context with 0. CEntry will use it to + // set the current context on the isolate. + __ Move(kContextRegister, Smi::zero()); + __ CallRuntime(Runtime::kWasmCompileLazy, 2); + + __ SmiUntag(s1, a0); // move return value to s1 since a0 will be restored + // to the value before the call + CHECK(!kSavedGpRegs.has(s1)); + + // Restore registers. + __ MultiPopFPU(kSavedFpRegs); + __ MultiPop(kSavedGpRegs); + } + + // The runtime function returned the jump table slot offset as a Smi (now in + // x17). Use that to compute the jump target. + __ Ld(kScratchReg, + MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ Add64(s1, s1, Operand(kScratchReg)); + // Finally, jump to the entrypoint. + __ Jump(s1); +} + +void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + { + FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK); + + // Save all parameter registers. They might hold live values, we restore + // them after the runtime call. + __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs); + __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); + + // Initialize the JavaScript context with 0. CEntry will use it to + // set the current context on the isolate. + __ Move(cp, Smi::zero()); + __ CallRuntime(Runtime::kWasmDebugBreak, 0); + + // Restore registers. + __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); + __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs); + } + __ Ret(); +} +#endif // V8_ENABLE_WEBASSEMBLY + +void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, + SaveFPRegsMode save_doubles, ArgvMode argv_mode, + bool builtin_exit_frame) { + // Called from JavaScript; parameters are on stack as if calling JS function + // a0: number of arguments including receiver + // a1: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) + // + // If argv_mode == ArgvMode::kRegister: + // a2: pointer to the first argument + + if (argv_mode == ArgvMode::kRegister) { + // Move argv into the correct register. + __ Move(s1, a2); + } else { + // Compute the argv pointer in a callee-saved register. + __ CalcScaledAddress(s1, sp, a0, kSystemPointerSizeLog2); + __ Sub64(s1, s1, kSystemPointerSize); + } + + // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterExitFrame( + save_doubles == SaveFPRegsMode::kSave, 0, + builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); + + // s3: number of arguments including receiver (C callee-saved) + // s1: pointer to first argument (C callee-saved) + // s2: pointer to builtin function (C callee-saved) + + // Prepare arguments for C routine. + // a0 = argc + __ Move(s3, a0); + __ Move(s2, a1); + + // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We + // also need to reserve the 4 argument slots on the stack. + + __ AssertStackIsAligned(); + + // a0 = argc, a1 = argv, a2 = isolate + __ li(a2, ExternalReference::isolate_address(masm->isolate())); + __ Move(a1, s1); + + __ StoreReturnAddressAndCall(s2); + + // Result returned in a0 or a1:a0 - do not destroy these registers! + + // Check result for exception sentinel. + Label exception_returned; + __ LoadRoot(a4, RootIndex::kException); + __ Branch(&exception_returned, eq, a4, Operand(a0)); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + Label okay; + ExternalReference pending_exception_address = ExternalReference::Create( + IsolateAddressId::kPendingExceptionAddress, masm->isolate()); + __ li(a2, pending_exception_address); + __ Ld(a2, MemOperand(a2)); + __ LoadRoot(a4, RootIndex::kTheHoleValue); + // Cannot use check here as it attempts to generate call into runtime. + __ Branch(&okay, eq, a4, Operand(a2), Label::Distance::kNear); + __ stop(); + __ bind(&okay); + } + + // Exit C frame and return. + // a0:a1: result + // sp: stack pointer + // fp: frame pointer + Register argc = argv_mode == ArgvMode::kRegister + // We don't want to pop arguments so set argc to no_reg. + ? no_reg + // s3: still holds argc (callee-saved). + : s3; + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); + + // Handling of exception. + __ bind(&exception_returned); + + ExternalReference pending_handler_context_address = ExternalReference::Create( + IsolateAddressId::kPendingHandlerContextAddress, masm->isolate()); + ExternalReference pending_handler_entrypoint_address = + ExternalReference::Create( + IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate()); + ExternalReference pending_handler_fp_address = ExternalReference::Create( + IsolateAddressId::kPendingHandlerFPAddress, masm->isolate()); + ExternalReference pending_handler_sp_address = ExternalReference::Create( + IsolateAddressId::kPendingHandlerSPAddress, masm->isolate()); + + // Ask the runtime for help to determine the handler. This will set a0 to + // contain the current pending exception, don't clobber it. + ExternalReference find_handler = + ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(3, 0, a0); + __ Move(a0, zero_reg); + __ Move(a1, zero_reg); + __ li(a2, ExternalReference::isolate_address(masm->isolate())); + __ CallCFunction(find_handler, 3); + } + + // Retrieve the handler context, SP and FP. + __ li(cp, pending_handler_context_address); + __ Ld(cp, MemOperand(cp)); + __ li(sp, pending_handler_sp_address); + __ Ld(sp, MemOperand(sp)); + __ li(fp, pending_handler_fp_address); + __ Ld(fp, MemOperand(fp)); + + // If the handler is a JS frame, restore the context to the frame. Note that + // the context will be set to (cp == 0) for non-JS frames. + Label zero; + __ Branch(&zero, eq, cp, Operand(zero_reg), Label::Distance::kNear); + __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ bind(&zero); + + // Compute the handler entry address and jump to it. + UseScratchRegisterScope temp(masm); + Register scratch = temp.Acquire(); + __ li(scratch, pending_handler_entrypoint_address); + __ Ld(scratch, MemOperand(scratch)); + __ Jump(scratch); +} + +void Builtins::Generate_DoubleToI(MacroAssembler* masm) { + Label done; + Register result_reg = t0; + + Register scratch = GetRegisterThatIsNotOneOf(result_reg); + Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch); + Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2); + DoubleRegister double_scratch = kScratchDoubleReg; + + // Account for saved regs. + const int kArgumentOffset = 4 * kSystemPointerSize; + + __ Push(result_reg); + __ Push(scratch, scratch2, scratch3); + + // Load double input. + __ LoadDouble(double_scratch, MemOperand(sp, kArgumentOffset)); + + // Try a conversion to a signed integer, if exception occurs, scratch is + // set to 0 + __ Trunc_w_d(scratch3, double_scratch, scratch); + + // If we had no exceptions then set result_reg and we are done. + Label error; + __ Branch(&error, eq, scratch, Operand(zero_reg), Label::Distance::kNear); + __ Move(result_reg, scratch3); + __ Branch(&done); + __ bind(&error); + + // Load the double value and perform a manual truncation. + Register input_high = scratch2; + Register input_low = scratch3; + + __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset)); + __ Lw(input_high, + MemOperand(sp, kArgumentOffset + Register::kExponentOffset)); + + Label normal_exponent; + // Extract the biased exponent in result. + __ ExtractBits(result_reg, input_high, HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + + // Check for Infinity and NaNs, which should return 0. + __ Sub32(scratch, result_reg, HeapNumber::kExponentMask); + __ LoadZeroIfConditionZero( + result_reg, + scratch); // result_reg = scratch == 0 ? 0 : result_reg + __ Branch(&done, eq, scratch, Operand(zero_reg)); + + // Express exponent as delta to (number of mantissa bits + 31). + __ Sub32(result_reg, result_reg, + Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); + + // If the delta is strictly positive, all bits would be shifted away, + // which means that we can return 0. + __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg), + Label::Distance::kNear); + __ Move(result_reg, zero_reg); + __ Branch(&done); + + __ bind(&normal_exponent); + const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; + // Calculate shift. + __ Add32(scratch, result_reg, + Operand(kShiftBase + HeapNumber::kMantissaBits)); + + // Save the sign. + Register sign = result_reg; + result_reg = no_reg; + __ And(sign, input_high, Operand(HeapNumber::kSignMask)); + + // We must specially handle shifts greater than 31. + Label high_shift_needed, high_shift_done; + __ Branch(&high_shift_needed, lt, scratch, Operand(32), + Label::Distance::kNear); + __ Move(input_high, zero_reg); + __ BranchShort(&high_shift_done); + __ bind(&high_shift_needed); + + // Set the implicit 1 before the mantissa part in input_high. + __ Or(input_high, input_high, + Operand(1 << HeapNumber::kMantissaBitsInTopWord)); + // Shift the mantissa bits to the correct position. + // We don't need to clear non-mantissa bits as they will be shifted away. + // If they weren't, it would mean that the answer is in the 32bit range. + __ Sll32(input_high, input_high, scratch); + + __ bind(&high_shift_done); + + // Replace the shifted bits with bits from the lower mantissa word. + Label pos_shift, shift_done, sign_negative; + __ li(kScratchReg, 32); + __ subw(scratch, kScratchReg, scratch); + __ Branch(&pos_shift, ge, scratch, Operand(zero_reg), Label::Distance::kNear); + + // Negate scratch. + __ Sub32(scratch, zero_reg, scratch); + __ Sll32(input_low, input_low, scratch); + __ BranchShort(&shift_done); + + __ bind(&pos_shift); + __ srlw(input_low, input_low, scratch); + + __ bind(&shift_done); + __ Or(input_high, input_high, Operand(input_low)); + // Restore sign if necessary. + __ Move(scratch, sign); + result_reg = sign; + sign = no_reg; + __ Sub32(result_reg, zero_reg, input_high); + __ Branch(&sign_negative, ne, scratch, Operand(zero_reg), + Label::Distance::kNear); + __ Move(result_reg, input_high); + __ bind(&sign_negative); + + __ bind(&done); + + __ Sd(result_reg, MemOperand(sp, kArgumentOffset)); + __ Pop(scratch, scratch2, scratch3); + __ Pop(result_reg); + __ Ret(); +} + +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmResume(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} +namespace { + +int AddressOffset(ExternalReference ref0, ExternalReference ref1) { + int64_t offset = (ref0.address() - ref1.address()); + DCHECK(static_cast(offset) == offset); + return static_cast(offset); +} + +// Calls an API function. Allocates HandleScope, extracts returned value +// from handle and propagates exceptions. Restores context. stack_space +// - space to be unwound on exit (includes the call JS arguments space and +// the additional space allocated for the fast call). +void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, + ExternalReference thunk_ref, int stack_space, + MemOperand* stack_space_operand, + MemOperand return_value_operand) { + ASM_CODE_COMMENT(masm); + Isolate* isolate = masm->isolate(); + ExternalReference next_address = + ExternalReference::handle_scope_next_address(isolate); + const int kNextOffset = 0; + const int kLimitOffset = AddressOffset( + ExternalReference::handle_scope_limit_address(isolate), next_address); + const int kLevelOffset = AddressOffset( + ExternalReference::handle_scope_level_address(isolate), next_address); + + DCHECK(function_address == a1 || function_address == a2); + + Label profiler_enabled, end_profiler_check; + { + UseScratchRegisterScope temp(masm); + Register scratch = temp.Acquire(); + __ li(scratch, ExternalReference::is_profiling_address(isolate)); + __ Lb(scratch, MemOperand(scratch, 0)); + __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg), + Label::Distance::kNear); + __ li(scratch, ExternalReference::address_of_runtime_stats_flag()); + __ Lw(scratch, MemOperand(scratch, 0)); + __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg), + Label::Distance::kNear); + { + // Call the api function directly. + __ Move(scratch, function_address); + __ BranchShort(&end_profiler_check); + } + + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ li(scratch, thunk_ref); + } + __ bind(&end_profiler_check); + + // Allocate HandleScope in callee-save registers. + __ li(s5, next_address); + __ Ld(s3, MemOperand(s5, kNextOffset)); + __ Ld(s1, MemOperand(s5, kLimitOffset)); + __ Lw(s2, MemOperand(s5, kLevelOffset)); + __ Add32(s2, s2, Operand(1)); + __ Sw(s2, MemOperand(s5, kLevelOffset)); + + __ StoreReturnAddressAndCall(scratch); + } + + Label promote_scheduled_exception; + Label delete_allocated_handles; + Label leave_exit_frame; + Label return_value_loaded; + + // Load value from ReturnValue. + __ Ld(a0, return_value_operand); + __ bind(&return_value_loaded); + + // No more valid handles (the result handle was the last one). Restore + // previous handle scope. + __ Sd(s3, MemOperand(s5, kNextOffset)); + if (FLAG_debug_code) { + __ Lw(a1, MemOperand(s5, kLevelOffset)); + __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, + Operand(s2)); + } + __ Sub32(s2, s2, Operand(1)); + __ Sw(s2, MemOperand(s5, kLevelOffset)); + __ Ld(kScratchReg, MemOperand(s5, kLimitOffset)); + __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg)); + + // Leave the API exit frame. + __ bind(&leave_exit_frame); + + if (stack_space_operand == nullptr) { + DCHECK_NE(stack_space, 0); + __ li(s3, Operand(stack_space)); + } else { + DCHECK_EQ(stack_space, 0); + STATIC_ASSERT(kCArgSlotCount == 0); + __ Ld(s3, *stack_space_operand); + } + + static constexpr bool kDontSaveDoubles = false; + static constexpr bool kRegisterContainsSlotCount = false; + __ LeaveExitFrame(kDontSaveDoubles, s3, NO_EMIT_RETURN, + kRegisterContainsSlotCount); + + // Check if the function scheduled an exception. + __ LoadRoot(a4, RootIndex::kTheHoleValue); + __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); + __ Ld(a5, MemOperand(kScratchReg)); + __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5), + Label::Distance::kNear); + + __ Ret(); + + // Re-throw by promoting a scheduled exception. + __ bind(&promote_scheduled_exception); + __ TailCallRuntime(Runtime::kPromoteScheduledException); + + // HandleScope limit has changed. Delete allocated extensions. + __ bind(&delete_allocated_handles); + __ Sd(s1, MemOperand(s5, kLimitOffset)); + __ Move(s3, a0); + __ PrepareCallCFunction(1, s1); + __ li(a0, ExternalReference::isolate_address(isolate)); + __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); + __ Move(a0, s3); + __ Branch(&leave_exit_frame); +} + +} // namespace + +void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- cp : context + // -- a1 : api function address + // -- a2 : arguments count + // -- a3 : call data + // -- a0 : holder + // -- + // -- sp[0] : receiver + // -- sp[8] : first argument + // -- ... + // -- sp[(argc) * 8] : last argument + // ----------------------------------- + UseScratchRegisterScope temps(masm); + temps.Include(t0, t1); + Register api_function_address = a1; + Register argc = a2; + Register call_data = a3; + Register holder = a0; + Register scratch = temps.Acquire(); + Register base = temps.Acquire(); // For addressing MemOperands on the stack. + + DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch, + base)); + + using FCA = FunctionCallbackArguments; + + STATIC_ASSERT(FCA::kArgsLength == 6); + STATIC_ASSERT(FCA::kNewTargetIndex == 5); + STATIC_ASSERT(FCA::kDataIndex == 4); + STATIC_ASSERT(FCA::kReturnValueOffset == 3); + STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(FCA::kIsolateIndex == 1); + STATIC_ASSERT(FCA::kHolderIndex == 0); + + // Set up FunctionCallbackInfo's implicit_args on the stack as follows: + // + // Target state: + // sp[0 * kSystemPointerSize]: kHolder + // sp[1 * kSystemPointerSize]: kIsolate + // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue) + // sp[3 * kSystemPointerSize]: undefined (kReturnValue) + // sp[4 * kSystemPointerSize]: kData + // sp[5 * kSystemPointerSize]: undefined (kNewTarget) + + // Set up the base register for addressing through MemOperands. It will point + // at the receiver (located at sp + argc * kSystemPointerSize). + __ CalcScaledAddress(base, sp, argc, kSystemPointerSizeLog2); + + // Reserve space on the stack. + __ Sub64(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize)); + + // kHolder. + __ Sd(holder, MemOperand(sp, 0 * kSystemPointerSize)); + + // kIsolate. + __ li(scratch, ExternalReference::isolate_address(masm->isolate())); + __ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // kReturnValueDefaultValue and kReturnValue. + __ LoadRoot(scratch, RootIndex::kUndefinedValue); + __ Sd(scratch, MemOperand(sp, 2 * kSystemPointerSize)); + __ Sd(scratch, MemOperand(sp, 3 * kSystemPointerSize)); + + // kData. + __ Sd(call_data, MemOperand(sp, 4 * kSystemPointerSize)); + + // kNewTarget. + __ Sd(scratch, MemOperand(sp, 5 * kSystemPointerSize)); + + // Keep a pointer to kHolder (= implicit_args) in a scratch register. + // We use it below to set up the FunctionCallbackInfo object. + __ Move(scratch, sp); + + // Allocate the v8::Arguments structure in the arguments' space since + // it's not controlled by GC. + static constexpr int kApiStackSpace = 4; + static constexpr bool kDontSaveDoubles = false; + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); + + // EnterExitFrame may align the sp. + + // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). + // Arguments are after the return address (pushed by EnterExitFrame()). + __ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // FunctionCallbackInfo::values_ (points at the first varargs argument passed + // on the stack). + __ Add64(scratch, scratch, + Operand((FCA::kArgsLength + 1) * kSystemPointerSize)); + __ Sd(scratch, MemOperand(sp, 2 * kSystemPointerSize)); + + // FunctionCallbackInfo::length_. + // Stored as int field, 32-bit integers within struct on stack always left + // justified by n64 ABI. + __ Sw(argc, MemOperand(sp, 3 * kSystemPointerSize)); + + // We also store the number of bytes to drop from the stack after returning + // from the API function here. + // Note: Unlike on other architectures, this stores the number of slots to + // drop, not the number of bytes. + __ Add64(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */)); + __ Sd(scratch, MemOperand(sp, 4 * kSystemPointerSize)); + + // v8::InvocationCallback's argument. + DCHECK(!AreAliased(api_function_address, scratch, a0)); + __ Add64(a0, sp, Operand(1 * kSystemPointerSize)); + + ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + + // There are two stack slots above the arguments we constructed on the stack. + // TODO(jgruber): Document what these arguments are. + static constexpr int kStackSlotsAboveFCA = 2; + MemOperand return_value_operand( + fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize); + + static constexpr int kUseStackSpaceOperand = 0; + MemOperand stack_space_operand(sp, 4 * kSystemPointerSize); + + AllowExternalCallThatCantCauseGC scope(masm); + CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, + kUseStackSpaceOperand, &stack_space_operand, + return_value_operand); +} + +void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { + // Build v8::PropertyCallbackInfo::args_ array on the stack and push property + // name below the exit frame to make GC aware of them. + STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6); + STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7); + + Register receiver = ApiGetterDescriptor::ReceiverRegister(); + Register holder = ApiGetterDescriptor::HolderRegister(); + Register callback = ApiGetterDescriptor::CallbackRegister(); + Register scratch = a4; + DCHECK(!AreAliased(receiver, holder, callback, scratch)); + + Register api_function_address = a2; + + // Here and below +1 is for name() pushed after the args_ array. + using PCA = PropertyCallbackArguments; + __ Sub64(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize); + __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize)); + __ LoadAnyTaggedField(scratch, + FieldMemOperand(callback, AccessorInfo::kDataOffset)); + __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize)); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); + __ Sd(scratch, + MemOperand(sp, (PCA::kReturnValueOffset + 1) * kSystemPointerSize)); + __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * + kSystemPointerSize)); + __ li(scratch, ExternalReference::isolate_address(masm->isolate())); + __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kSystemPointerSize)); + __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kSystemPointerSize)); + // should_throw_on_error -> false + DCHECK_EQ(0, Smi::zero().ptr()); + __ Sd(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * + kSystemPointerSize)); + __ LoadTaggedPointerField( + scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); + __ Sd(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + // v8::PropertyCallbackInfo::args_ array and name handle. + const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; + + // Load address of v8::PropertyAccessorInfo::args_ array and name handle. + __ Move(a0, sp); // a0 = Handle + __ Add64(a1, a0, Operand(1 * kSystemPointerSize)); // a1 = v8::PCI::args_ + + const int kApiStackSpace = 1; + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(false, kApiStackSpace); + + // Create v8::PropertyCallbackInfo object on the stack and initialize + // it's args_ field. + __ Sd(a1, MemOperand(sp, 1 * kSystemPointerSize)); + __ Add64(a1, sp, Operand(1 * kSystemPointerSize)); + // a1 = v8::PropertyCallbackInfo& + + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(); + + __ LoadTaggedPointerField( + scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); + __ Ld(api_function_address, + FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); + + // +3 is to skip prolog, return address and name handle. + MemOperand return_value_operand( + fp, + (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize); + MemOperand* const kUseStackSpaceConstant = nullptr; + CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, + kStackUnwindSpace, kUseStackSpaceConstant, + return_value_operand); +} + +void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { + // The sole purpose of DirectCEntry is for movable callers (e.g. any general + // purpose Code object) to be able to call into C functions that may trigger + // GC and thus move the caller. + // + // DirectCEntry places the return address on the stack (updated by the GC), + // making the call GC safe. The irregexp backend relies on this. + + // Make place for arguments to fit C calling convention. Callers use + // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't + // have to do that here. Any caller must drop kCArgsSlotsSize stack space + // after the call. + __ Add64(sp, sp, -kCArgsSlotsSize); + + __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address. + __ Call(t6); // Call the C++ function. + __ Ld(t6, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code. + + if (FLAG_debug_code && FLAG_enable_slow_asserts) { + // In case of an error the return address may point to a memory area + // filled with kZapValue by the GC. Dereference the address and check for + // this. + __ Uld(a4, MemOperand(t6)); + __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4, + Operand(reinterpret_cast(kZapValue))); + } + + __ Jump(t6); +} + +namespace { + +// This code tries to be close to ia32 code so that any changes can be +// easily ported. +void Generate_DeoptimizationEntry(MacroAssembler* masm, + DeoptimizeKind deopt_kind) { + Isolate* isolate = masm->isolate(); + + // Unlike on ARM we don't save all the registers, just the useful ones. + // For the rest, there are gaps on the stack, so the offsets remain the same. + const int kNumberOfRegisters = Register::kNumRegisters; + + RegList restored_regs = kJSCallerSaved | kCalleeSaved; + RegList saved_regs = restored_regs | sp | ra; + + const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; + + // Save all double FPU registers before messing with them. + __ Sub64(sp, sp, Operand(kDoubleRegsSize)); + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int offset = code * kDoubleSize; + __ StoreDouble(fpu_reg, MemOperand(sp, offset)); + } + + // Push saved_regs (needed to populate FrameDescription::registers_). + // Leave gaps for other registers. + __ Sub64(sp, sp, kNumberOfRegisters * kSystemPointerSize); + for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { + if ((saved_regs.bits() & (1 << i)) != 0) { + __ Sd(ToRegister(i), MemOperand(sp, kSystemPointerSize * i)); + } + } + + __ li(a2, + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate)); + __ Sd(fp, MemOperand(a2)); + + const int kSavedRegistersAreaSize = + (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize; + + // Get the address of the location in the code object (a3) (return + // address for lazy deoptimization) and compute the fp-to-sp delta in + // register a4. + __ Move(a2, ra); + __ Add64(a3, sp, Operand(kSavedRegistersAreaSize)); + + __ Sub64(a3, fp, a3); + + // Allocate a new deoptimizer object. + __ PrepareCallCFunction(5, a4); + // Pass five arguments, according to n64 ABI. + __ Move(a0, zero_reg); + Label context_check; + __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ JumpIfSmi(a1, &context_check); + __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + __ bind(&context_check); + __ li(a1, Operand(static_cast(deopt_kind))); + // a2: code object address + // a3: fp-to-sp delta + __ li(a4, ExternalReference::isolate_address(isolate)); + + // Call Deoptimizer::New(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); + } + + // Preserve "deoptimizer" object in register a0 and get the input + // frame descriptor pointer to a1 (deoptimizer->input_); + __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset())); + + // Copy core registers into FrameDescription::registers_[kNumRegisters]. + DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); + for (int i = 0; i < kNumberOfRegisters; i++) { + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); + if ((saved_regs.bits() & (1 << i)) != 0) { + __ Ld(a2, MemOperand(sp, i * kSystemPointerSize)); + __ Sd(a2, MemOperand(a1, offset)); + } else if (FLAG_debug_code) { + __ li(a2, kDebugZapValue); + __ Sd(a2, MemOperand(a1, offset)); + } + } + + int double_regs_offset = FrameDescription::double_registers_offset(); + // Copy FPU registers to + // double_registers_[DoubleRegister::kNumAllocatableRegisters] + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + int dst_offset = code * kDoubleSize + double_regs_offset; + int src_offset = + code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize; + __ LoadDouble(ft0, MemOperand(sp, src_offset)); + __ StoreDouble(ft0, MemOperand(a1, dst_offset)); + } + + // Remove the saved registers from the stack. + __ Add64(sp, sp, Operand(kSavedRegistersAreaSize)); + + // Compute a pointer to the unwinding limit in register a2; that is + // the first stack slot not part of the input frame. + __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset())); + __ Add64(a2, a2, sp); + + // Unwind the stack down to - but not including - the unwinding + // limit and copy the contents of the activation frame to the input + // frame description. + __ Add64(a3, a1, Operand(FrameDescription::frame_content_offset())); + Label pop_loop; + Label pop_loop_header; + __ BranchShort(&pop_loop_header); + __ bind(&pop_loop); + __ pop(a4); + __ Sd(a4, MemOperand(a3, 0)); + __ Add64(a3, a3, sizeof(uint64_t)); + __ bind(&pop_loop_header); + __ Branch(&pop_loop, ne, a2, Operand(sp), Label::Distance::kNear); + // Compute the output frame in the deoptimizer. + __ push(a0); // Preserve deoptimizer object across call. + // a0: deoptimizer object; a1: scratch. + __ PrepareCallCFunction(1, a1); + // Call Deoptimizer::ComputeOutputFrames(). + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); + } + __ pop(a0); // Restore deoptimizer object (class Deoptimizer). + + __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); + + // Replace the current (input) frame with the output frames. + Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; + // Outer loop state: a4 = current "FrameDescription** output_", + // a1 = one past the last FrameDescription**. + __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); + __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. + __ CalcScaledAddress(a1, a4, a1, kSystemPointerSizeLog2); + __ BranchShort(&outer_loop_header); + __ bind(&outer_push_loop); + // Inner loop state: a2 = current FrameDescription*, a3 = loop index. + __ Ld(a2, MemOperand(a4, 0)); // output_[ix] + __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset())); + __ BranchShort(&inner_loop_header); + __ bind(&inner_push_loop); + __ Sub64(a3, a3, Operand(sizeof(uint64_t))); + __ Add64(a6, a2, Operand(a3)); + __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset())); + __ push(a7); + __ bind(&inner_loop_header); + __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg)); + + __ Add64(a4, a4, Operand(kSystemPointerSize)); + __ bind(&outer_loop_header); + __ Branch(&outer_push_loop, lt, a4, Operand(a1)); + + __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset())); + for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { + int code = config->GetAllocatableDoubleCode(i); + const DoubleRegister fpu_reg = DoubleRegister::from_code(code); + int src_offset = code * kDoubleSize + double_regs_offset; + __ LoadDouble(fpu_reg, MemOperand(a1, src_offset)); + } + + // Push pc and continuation from the last output frame. + __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset())); + __ push(a6); + __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset())); + __ push(a6); + + // Technically restoring 't3' should work unless zero_reg is also restored + // but it's safer to check for this. + DCHECK(!(restored_regs.has(t3))); + // Restore the registers from the last output frame. + __ Move(t3, a2); + for (int i = kNumberOfRegisters - 1; i >= 0; i--) { + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); + if ((restored_regs.bits() & (1 << i)) != 0) { + __ Ld(ToRegister(i), MemOperand(t3, offset)); + } + } + + __ pop(t6); // Get continuation, leave pc on stack. + __ pop(ra); + __ Jump(t6); + __ stop(); +} + +} // namespace + +void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) { + Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager); +} + +void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { + Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); +} + +void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) { + Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused); +} + +namespace { + +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { + Label start; + __ bind(&start); + + // Get function from the frame. + Register closure = a1; + __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + + // Get the Code object from the shared function info. + Register code_obj = s1; + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ GetObjectType(code_obj, scratch, scratch); + __ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE)); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ GetObjectType(code_obj, scratch, scratch); + __ Assert(eq, AbortReason::kExpectedBaselineData, scratch, + Operand(CODET_TYPE)); + } + if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + AssertCodeIsBaseline(masm, code_obj, scratch); + } + // Replace BytecodeOffset with the feedback vector. + Register feedback_vector = a2; + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + UseScratchRegisterScope temps(masm); + Register type = temps.Acquire(); + __ GetObjectType(feedback_vector, type, type); + __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE)); + // Save BytecodeOffset from the stack frame. + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + // Replace BytecodeOffset with the feedback vector. + __ Sd(feedback_vector, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + feedback_vector = no_reg; + + // Compute baseline pc for bytecode offset. + ExternalReference get_baseline_pc_extref; + if (next_bytecode || is_osr) { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_next_executed_bytecode(); + } else { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_bytecode_offset(); + } + + Register get_baseline_pc = a3; + __ li(get_baseline_pc, get_baseline_pc_extref); + + // If the code deoptimizes during the implicit function entry stack interrupt + // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is + // not a valid bytecode offset. + // TODO(pthier): Investigate if it is feasible to handle this special case + // in TurboFan instead of here. + Label valid_bytecode_offset, function_entry_bytecode; + if (!is_osr) { + __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset)); + } + + __ Sub64(kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeOffsetRegister, + (BytecodeArray::kHeaderSize - kHeapObjectTag)); + + __ bind(&valid_bytecode_offset); + // Get bytecode array from the stack frame. + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Push(kInterpreterAccumulatorRegister); + { + Register arg_reg_1 = a0; + Register arg_reg_2 = a1; + Register arg_reg_3 = a2; + __ Move(arg_reg_1, code_obj); + __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister); + __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister); + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallCFunction(get_baseline_pc, 3, 0); + } + __ Add64(code_obj, code_obj, kReturnRegister0); + __ Pop(kInterpreterAccumulatorRegister); + + if (is_osr) { + // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm + // Sparkplug here. + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister); + Generate_OSREntry(masm, code_obj, + Operand(Code::kHeaderSize - kHeapObjectTag)); + } else { + __ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag); + __ Jump(code_obj); + } + __ Trap(); // Unreachable. + + if (!is_osr) { + __ bind(&function_entry_bytecode); + // If the bytecode offset is kFunctionEntryOffset, get the start address of + // the first bytecode. + __ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0))); + if (next_bytecode) { + __ li(get_baseline_pc, + ExternalReference::baseline_pc_for_bytecode_offset()); + } + __ Branch(&valid_bytecode_offset); + } + + __ bind(&install_baseline_code); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); + __ Push(closure); + __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); + } + // Retry from the start after installing baseline code. + __ Branch(&start); +} + +} // namespace + +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); +} + +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); +} + +void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false, true); +} + +#undef __ + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_RISCV64 diff --git a/deps/v8/src/codegen/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h new file mode 100644 index 00000000000000..fceb46b78e6f51 --- /dev/null +++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h @@ -0,0 +1,353 @@ + +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_ +#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_ + +#include "src/codegen/mips/assembler-mips.h" + +#include "src/codegen/assembler.h" +#include "src/debug/debug.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } + +// ----------------------------------------------------------------------------- +// Operand and MemOperand. + +bool Operand::is_reg() const { return rm_.is_valid(); } + +int32_t Operand::immediate() const { + DCHECK(!is_reg()); + DCHECK(!IsHeapObjectRequest()); + return value_.immediate; +} + +// ----------------------------------------------------------------------------- +// RelocInfo. + +void RelocInfo::apply(intptr_t delta) { + if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { + // Absolute code pointer inside code object moves with the code object. + Assembler::RelocateInternalReference(rmode_, pc_, delta); + } else if (IsRelativeCodeTarget(rmode_)) { + Assembler::RelocateRelativeReference(rmode_, pc_, delta); + } +} + +Address RelocInfo::target_address() { + DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) || + IsWasmCall(rmode_)); + return Assembler::target_address_at(pc_, constant_pool_); +} + +Address RelocInfo::target_address_address() { + DCHECK(HasTargetAddressAddress()); + // Read the address of the word containing the target_address in an + // instruction stream. + // The only architecture-independent user of this function is the serializer. + // The serializer uses it to find out how many raw bytes of instruction to + // output before the next target. + // For an instruction like LUI/ORI where the target bits are mixed into the + // instruction bits, the size of the target will be zero, indicating that the + // serializer should not step forward in memory after a target is resolved + // and written. In this case the target_address_address function should + // return the end of the instructions to be patched, allowing the + // deserializer to deserialize the instructions as raw bytes and put them in + // place, ready to be patched with the target. After jump optimization, + // that is the address of the instruction that follows J/JAL/JR/JALR + // instruction. + if (IsMipsArchVariant(kMips32r6)) { + // On R6 we don't move to the end of the instructions to be patched, but one + // instruction before, because if these instructions are at the end of the + // code object it can cause errors in the deserializer. + return pc_ + (Assembler::kInstructionsFor32BitConstant - 1) * kInstrSize; + } else { + return pc_ + Assembler::kInstructionsFor32BitConstant * kInstrSize; + } +} + +Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } + +int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } + +void Assembler::deserialization_set_special_target_at( + Address instruction_payload, Code code, Address target) { + set_target_address_at(instruction_payload, + !code.is_null() ? code.constant_pool() : kNullAddress, + target); +} + +int Assembler::deserialization_special_target_size( + Address instruction_payload) { + return kSpecialTargetSize; +} + +void Assembler::set_target_internal_reference_encoded_at(Address pc, + Address target) { + Instr instr1 = Assembler::instr_at(pc + 0 * kInstrSize); + Instr instr2 = Assembler::instr_at(pc + 1 * kInstrSize); + DCHECK(Assembler::IsLui(instr1)); + DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2)); + instr1 &= ~kImm16Mask; + instr2 &= ~kImm16Mask; + int32_t imm = static_cast(target); + DCHECK_EQ(imm & 3, 0); + if (Assembler::IsJicOrJialc(instr2)) { + // Encoded internal references are lui/jic load of 32-bit absolute address. + uint32_t lui_offset_u, jic_offset_u; + Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); + + Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); + Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); + } else { + // Encoded internal references are lui/ori load of 32-bit absolute address. + PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, + 1 * kInstrSize); + } + + // Currently used only by deserializer, and all code will be flushed + // after complete deserialization, no need to flush on each reference. +} + +void Assembler::deserialization_set_target_internal_reference_at( + Address pc, Address target, RelocInfo::Mode mode) { + if (RelocInfo::IsInternalReferenceEncoded(mode)) { + DCHECK(IsLui(instr_at(pc))); + set_target_internal_reference_encoded_at(pc, target); + } else { + DCHECK(RelocInfo::IsInternalReference(mode)); + Memory
(pc) = target; + } +} + +HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { + DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) || + IsDataEmbeddedObject(rmode_)); + if (IsDataEmbeddedObject(rmode_)) { + return HeapObject::cast(Object(ReadUnalignedValue
(pc_))); + } + return HeapObject::cast( + Object(Assembler::target_address_at(pc_, constant_pool_))); +} + +Handle RelocInfo::target_object_handle(Assembler* origin) { + if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) { + return Handle(reinterpret_cast( + Assembler::target_address_at(pc_, constant_pool_))); + } else if (IsDataEmbeddedObject(rmode_)) { + return Handle::cast(ReadUnalignedValue>(pc_)); + } + DCHECK(IsRelativeCodeTarget(rmode_)); + return origin->relative_code_target_object_handle_at(pc_); +} + +void RelocInfo::set_target_object(Heap* heap, HeapObject target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) || + IsDataEmbeddedObject(rmode_)); + if (IsDataEmbeddedObject(rmode_)) { + WriteUnalignedValue(pc_, target.ptr()); + // No need to flush icache since no instructions were changed. + } else { + Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), + icache_flush_mode); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { + WriteBarrierForCode(host(), this, target); + } +} + +Address RelocInfo::target_external_reference() { + DCHECK(IsExternalReference(rmode_)); + return Assembler::target_address_at(pc_, constant_pool_); +} + +void RelocInfo::set_target_external_reference( + Address target, ICacheFlushMode icache_flush_mode) { + DCHECK(IsExternalReference(rmode_)); + Assembler::set_target_address_at(pc_, constant_pool_, target, + icache_flush_mode); +} + +Address RelocInfo::target_internal_reference() { + if (IsInternalReference(rmode_)) { + return Memory
(pc_); + } else { + // Encoded internal references are lui/ori or lui/jic load of 32-bit + // absolute address. + DCHECK(IsInternalReferenceEncoded(rmode_)); + Instr instr1 = Assembler::instr_at(pc_ + 0 * kInstrSize); + Instr instr2 = Assembler::instr_at(pc_ + 1 * kInstrSize); + DCHECK(Assembler::IsLui(instr1)); + DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2)); + if (Assembler::IsJicOrJialc(instr2)) { + return static_cast
( + Assembler::CreateTargetAddress(instr1, instr2)); + } + return static_cast
(Assembler::GetLuiOriImmediate(instr1, instr2)); + } +} + +Address RelocInfo::target_internal_reference_address() { + DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)); + return pc_; +} + +Address RelocInfo::target_runtime_entry(Assembler* origin) { + DCHECK(IsRuntimeEntry(rmode_)); + return target_address(); +} + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) + set_target_address(target, write_barrier_mode, icache_flush_mode); +} + +Address RelocInfo::target_off_heap_target() { + DCHECK(IsOffHeapTarget(rmode_)); + return Assembler::target_address_at(pc_, constant_pool_); +} + +void RelocInfo::WipeOut() { + DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || + IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || + IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || + IsOffHeapTarget(rmode_)); + if (IsInternalReference(rmode_)) { + Memory
(pc_) = kNullAddress; + } else if (IsInternalReferenceEncoded(rmode_)) { + Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); + } else { + Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); + } +} + +Handle Assembler::relative_code_target_object_handle_at( + Address pc) const { + Instr instr1 = instr_at(pc); + Instr instr2 = instr_at(pc + kInstrSize); + DCHECK(IsLui(instr1)); + DCHECK(IsOri(instr2) || IsNal(instr2)); + DCHECK(IsNal(instr2) || IsNal(instr_at(pc - kInstrSize))); + if (IsNal(instr2)) { + instr2 = instr_at(pc + 2 * kInstrSize); + } + // Interpret 2 instructions generated by li (lui/ori). + int code_target_index = GetLuiOriImmediate(instr1, instr2); + return GetCodeTarget(code_target_index); +} + +// ----------------------------------------------------------------------------- +// Assembler. + +void Assembler::CheckBuffer() { + if (buffer_space() <= kGap) { + GrowBuffer(); + } +} + +void Assembler::CheckForEmitInForbiddenSlot() { + if (!is_buffer_growth_blocked()) { + CheckBuffer(); + } + if (IsPrevInstrCompactBranch()) { + // Nop instruction to precede a CTI in forbidden slot: + Instr nop = SPECIAL | SLL; + *reinterpret_cast(pc_) = nop; + pc_ += kInstrSize; + + ClearCompactBranchState(); + } +} + +void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) { + if (IsPrevInstrCompactBranch()) { + if (Instruction::IsForbiddenAfterBranchInstr(x)) { + // Nop instruction to precede a CTI in forbidden slot: + Instr nop = SPECIAL | SLL; + *reinterpret_cast(pc_) = nop; + pc_ += kInstrSize; + } + ClearCompactBranchState(); + } + *reinterpret_cast(pc_) = x; + pc_ += kInstrSize; + if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) { + EmittedCompactBranchInstruction(); + } + CheckTrampolinePoolQuick(); +} + +template <> +inline void Assembler::EmitHelper(uint8_t x); + +template +void Assembler::EmitHelper(T x) { + *reinterpret_cast(pc_) = x; + pc_ += sizeof(x); + CheckTrampolinePoolQuick(); +} + +template <> +void Assembler::EmitHelper(uint8_t x) { + *reinterpret_cast(pc_) = x; + pc_ += sizeof(x); + if (reinterpret_cast(pc_) % kInstrSize == 0) { + CheckTrampolinePoolQuick(); + } +} + +void Assembler::emit(Instr x, CompactBranchType is_compact_branch) { + if (!is_buffer_growth_blocked()) { + CheckBuffer(); + } + EmitHelper(x, is_compact_branch); +} + +EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_ diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc new file mode 100644 index 00000000000000..788651e6fc7092 --- /dev/null +++ b/deps/v8/src/codegen/mips/assembler-mips.cc @@ -0,0 +1,3853 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +#include "src/codegen/mips/assembler-mips.h" + +#if V8_TARGET_ARCH_MIPS + +#include "src/base/bits.h" +#include "src/base/cpu.h" +#include "src/codegen/mips/assembler-mips-inl.h" +#include "src/codegen/safepoint-table.h" +#include "src/codegen/string-constants.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/objects/heap-number-inl.h" + +namespace v8 { +namespace internal { + +// Get the CPU features enabled by the build. For cross compilation the +// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS +// can be defined to enable FPU instructions when building the +// snapshot. +static unsigned CpuFeaturesImpliedByCompiler() { + unsigned answer = 0; +#ifdef CAN_USE_FPU_INSTRUCTIONS + answer |= 1u << FPU; +#endif // def CAN_USE_FPU_INSTRUCTIONS + + // If the compiler is allowed to use FPU then we can use FPU too in our code + // generation even when generating snapshots. This won't work for cross + // compilation. +#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 + answer |= 1u << FPU; +#endif + + return answer; +} + +bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); } + +void CpuFeatures::ProbeImpl(bool cross_compile) { + supported_ |= CpuFeaturesImpliedByCompiler(); + + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; + + // If the compiler is allowed to use fpu then we can use fpu too in our + // code generation. +#ifndef __mips__ + // For the simulator build, use FPU. + supported_ |= 1u << FPU; +#if defined(_MIPS_ARCH_MIPS32R6) + // FP64 mode is implied on r6. + supported_ |= 1u << FP64FPU; +#if defined(_MIPS_MSA) + supported_ |= 1u << MIPS_SIMD; +#endif +#endif +#if defined(FPU_MODE_FP64) + supported_ |= 1u << FP64FPU; +#endif +#else + // Probe for additional features at runtime. + base::CPU cpu; + if (cpu.has_fpu()) supported_ |= 1u << FPU; +#if defined(FPU_MODE_FPXX) + if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU; +#elif defined(FPU_MODE_FP64) + supported_ |= 1u << FP64FPU; +#if defined(_MIPS_ARCH_MIPS32R6) +#if defined(_MIPS_MSA) + supported_ |= 1u << MIPS_SIMD; +#else + if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD; +#endif +#endif +#endif +#if defined(_MIPS_ARCH_MIPS32RX) + if (cpu.architecture() == 6) { + supported_ |= 1u << MIPSr6; + } else if (cpu.architecture() == 2) { + supported_ |= 1u << MIPSr1; + supported_ |= 1u << MIPSr2; + } else { + supported_ |= 1u << MIPSr1; + } +#endif +#endif + + // Set a static value on whether Simd is supported. + // This variable is only used for certain archs to query SupportWasmSimd128() + // at runtime in builtins using an extern ref. Other callers should use + // CpuFeatures::SupportWasmSimd128(). + CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128(); +} + +void CpuFeatures::PrintTarget() {} +void CpuFeatures::PrintFeatures() {} + +int ToNumber(Register reg) { + DCHECK(reg.is_valid()); + const int kNumbers[] = { + 0, // zero_reg + 1, // at + 2, // v0 + 3, // v1 + 4, // a0 + 5, // a1 + 6, // a2 + 7, // a3 + 8, // t0 + 9, // t1 + 10, // t2 + 11, // t3 + 12, // t4 + 13, // t5 + 14, // t6 + 15, // t7 + 16, // s0 + 17, // s1 + 18, // s2 + 19, // s3 + 20, // s4 + 21, // s5 + 22, // s6 + 23, // s7 + 24, // t8 + 25, // t9 + 26, // k0 + 27, // k1 + 28, // gp + 29, // sp + 30, // fp + 31, // ra + }; + return kNumbers[reg.code()]; +} + +Register ToRegister(int num) { + DCHECK(num >= 0 && num < kNumRegisters); + const Register kRegisters[] = { + zero_reg, at, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, t7, + s0, s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra}; + return kRegisters[num]; +} + +// ----------------------------------------------------------------------------- +// Implementation of RelocInfo. + +const int RelocInfo::kApplyMask = + RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | + RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) | + RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); + +bool RelocInfo::IsCodedSpecially() { + // The deserializer needs to know whether a pointer is specially coded. Being + // specially coded on MIPS means that it is a lui/ori instruction, and that is + // always the case inside code objects. + return true; +} + +bool RelocInfo::IsInConstantPool() { return false; } + +uint32_t RelocInfo::wasm_call_tag() const { + DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); + return static_cast( + Assembler::target_address_at(pc_, constant_pool_)); +} + +// ----------------------------------------------------------------------------- +// Implementation of Operand and MemOperand. +// See assembler-mips-inl.h for inlined constructors. + +Operand::Operand(Handle handle) + : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) { + value_.immediate = static_cast(handle.address()); +} + +Operand Operand::EmbeddedNumber(double value) { + int32_t smi; + if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); + Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); + result.is_heap_object_request_ = true; + result.value_.heap_object_request = HeapObjectRequest(value); + return result; +} + +Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { + Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); + result.is_heap_object_request_ = true; + result.value_.heap_object_request = HeapObjectRequest(str); + return result; +} + +MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) { + offset_ = offset; +} + +MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier, + OffsetAddend offset_addend) + : Operand(rm) { + offset_ = unit * multiplier + offset_addend; +} + +void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { + DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); + for (auto& request : heap_object_requests_) { + Handle object; + switch (request.kind()) { + case HeapObjectRequest::kHeapNumber: + object = isolate->factory()->NewHeapNumber( + request.heap_number()); + break; + case HeapObjectRequest::kStringConstant: + const StringConstantBase* str = request.string(); + CHECK_NOT_NULL(str); + object = str->AllocateStringConstant(isolate); + break; + } + Address pc = reinterpret_cast
(buffer_start_) + request.offset(); + set_target_value_at(pc, reinterpret_cast(object.location())); + } +} + +// ----------------------------------------------------------------------------- +// Specific instructions, constants, and masks. + +static const int kNegOffset = 0x00008000; +// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) +// operations as post-increment of sp. +const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) | + (sp.code() << kRtShift) | + (kPointerSize & kImm16Mask); +// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. +const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) | + (sp.code() << kRtShift) | + (-kPointerSize & kImm16Mask); +// sw(r, MemOperand(sp, 0)) +const Instr kPushRegPattern = SW | (sp.code() << kRsShift) | (0 & kImm16Mask); +// lw(r, MemOperand(sp, 0)) +const Instr kPopRegPattern = LW | (sp.code() << kRsShift) | (0 & kImm16Mask); + +const Instr kLwRegFpOffsetPattern = + LW | (fp.code() << kRsShift) | (0 & kImm16Mask); + +const Instr kSwRegFpOffsetPattern = + SW | (fp.code() << kRsShift) | (0 & kImm16Mask); + +const Instr kLwRegFpNegOffsetPattern = + LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); + +const Instr kSwRegFpNegOffsetPattern = + SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); +// A mask for the Rt register for push, pop, lw, sw instructions. +const Instr kRtMask = kRtFieldMask; +const Instr kLwSwInstrTypeMask = 0xFFE00000; +const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; +const Instr kLwSwOffsetMask = kImm16Mask; + +Assembler::Assembler(const AssemblerOptions& options, + std::unique_ptr buffer) + : AssemblerBase(options, std::move(buffer)), scratch_register_list_({at}) { + reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); + + last_trampoline_pool_end_ = 0; + no_trampoline_pool_before_ = 0; + trampoline_pool_blocked_nesting_ = 0; + // We leave space (16 * kTrampolineSlotsSize) + // for BlockTrampolinePoolScope buffer. + next_buffer_check_ = FLAG_force_long_branches + ? kMaxInt + : kMaxBranchOffset - kTrampolineSlotsSize * 16; + internal_trampoline_exception_ = false; + last_bound_pos_ = 0; + + trampoline_emitted_ = FLAG_force_long_branches; + unbound_labels_count_ = 0; + block_buffer_growth_ = false; +} + +void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, + SafepointTableBuilder* safepoint_table_builder, + int handler_table_offset) { + // As a crutch to avoid having to add manual Align calls wherever we use a + // raw workflow to create Code objects (mostly in tests), add another Align + // call here. It does no harm - the end of the Code object is aligned to the + // (larger) kCodeAlignment anyways. + // TODO(jgruber): Consider moving responsibility for proper alignment to + // metadata table builders (safepoint, handler, constant pool, code + // comments). + DataAlign(Code::kMetadataAlignment); + + EmitForbiddenSlotInstruction(); + + int code_comments_size = WriteCodeComments(); + + DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. + + AllocateAndInstallRequestedHeapObjects(isolate); + + // Set up code descriptor. + // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to + // this point to make CodeDesc initialization less fiddly. + + static constexpr int kConstantPoolSize = 0; + const int instruction_size = pc_offset(); + const int code_comments_offset = instruction_size - code_comments_size; + const int constant_pool_offset = code_comments_offset - kConstantPoolSize; + const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) + ? constant_pool_offset + : handler_table_offset; + const int safepoint_table_offset = + (safepoint_table_builder == kNoSafepointTable) + ? handler_table_offset2 + : safepoint_table_builder->safepoint_table_offset(); + const int reloc_info_offset = + static_cast(reloc_info_writer.pos() - buffer_->start()); + CodeDesc::Initialize(desc, this, safepoint_table_offset, + handler_table_offset2, constant_pool_offset, + code_comments_offset, reloc_info_offset); +} + +void Assembler::Align(int m) { + DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); + EmitForbiddenSlotInstruction(); + while ((pc_offset() & (m - 1)) != 0) { + nop(); + } +} + +void Assembler::CodeTargetAlign() { + // No advantage to aligning branch/call targets to more than + // single instruction, that I am aware of. + Align(4); +} + +Register Assembler::GetRtReg(Instr instr) { + return Register::from_code((instr & kRtFieldMask) >> kRtShift); +} + +Register Assembler::GetRsReg(Instr instr) { + return Register::from_code((instr & kRsFieldMask) >> kRsShift); +} + +Register Assembler::GetRdReg(Instr instr) { + return Register::from_code((instr & kRdFieldMask) >> kRdShift); +} + +uint32_t Assembler::GetRt(Instr instr) { + return (instr & kRtFieldMask) >> kRtShift; +} + +uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; } + +uint32_t Assembler::GetRs(Instr instr) { + return (instr & kRsFieldMask) >> kRsShift; +} + +uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; } + +uint32_t Assembler::GetRd(Instr instr) { + return (instr & kRdFieldMask) >> kRdShift; +} + +uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; } + +uint32_t Assembler::GetSa(Instr instr) { + return (instr & kSaFieldMask) >> kSaShift; +} + +uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; } + +uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; } + +uint32_t Assembler::GetFunction(Instr instr) { + return (instr & kFunctionFieldMask) >> kFunctionShift; +} + +uint32_t Assembler::GetFunctionField(Instr instr) { + return instr & kFunctionFieldMask; +} + +uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; } + +uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; } + +bool Assembler::IsPop(Instr instr) { + return (instr & ~kRtMask) == kPopRegPattern; +} + +bool Assembler::IsPush(Instr instr) { + return (instr & ~kRtMask) == kPushRegPattern; +} + +bool Assembler::IsSwRegFpOffset(Instr instr) { + return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern); +} + +bool Assembler::IsLwRegFpOffset(Instr instr) { + return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern); +} + +bool Assembler::IsSwRegFpNegOffset(Instr instr) { + return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == + kSwRegFpNegOffsetPattern); +} + +bool Assembler::IsLwRegFpNegOffset(Instr instr) { + return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == + kLwRegFpNegOffsetPattern); +} + +// Labels refer to positions in the (to be) generated code. +// There are bound, linked, and unused labels. +// +// Bound labels refer to known positions in the already +// generated code. pos() is the position the label refers to. +// +// Linked labels refer to unknown positions in the code +// to be generated; pos() is the position of the last +// instruction using the label. + +// The link chain is terminated by a value in the instruction of -1, +// which is an otherwise illegal value (branch -1 is inf loop). +// The instruction 16-bit offset field addresses 32-bit words, but in +// code is conv to an 18-bit value addressing bytes, hence the -4 value. + +const int kEndOfChain = -4; +// Determines the end of the Jump chain (a subset of the label link chain). +const int kEndOfJumpChain = 0; + +bool Assembler::IsMsaBranch(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rs_field = GetRsField(instr); + if (opcode == COP1) { + switch (rs_field) { + case BZ_V: + case BZ_B: + case BZ_H: + case BZ_W: + case BZ_D: + case BNZ_V: + case BNZ_B: + case BNZ_H: + case BNZ_W: + case BNZ_D: + return true; + default: + return false; + } + } else { + return false; + } +} + +bool Assembler::IsBranch(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rt_field = GetRtField(instr); + uint32_t rs_field = GetRsField(instr); + // Checks if the instruction is a branch. + bool isBranch = + opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ || + opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL || + (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || + rt_field == BLTZAL || rt_field == BGEZAL)) || + (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. + (opcode == COP1 && rs_field == BC1EQZ) || + (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr); + if (!isBranch && IsMipsArchVariant(kMips32r6)) { + // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and + // POP30 (BNVC, BNEC, BNEZALC) are branch ops. + isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC || + opcode == BALC || + (opcode == POP66 && rs_field != 0) || // BEQZC + (opcode == POP76 && rs_field != 0); // BNEZC + } + return isBranch; +} + +bool Assembler::IsBc(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is a BC or BALC. + return opcode == BC || opcode == BALC; +} + +bool Assembler::IsNal(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rt_field = GetRtField(instr); + uint32_t rs_field = GetRsField(instr); + return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0; +} + +bool Assembler::IsBzc(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is BEQZC or BNEZC. + return (opcode == POP66 && GetRsField(instr) != 0) || + (opcode == POP76 && GetRsField(instr) != 0); +} + +bool Assembler::IsEmittedConstant(Instr instr) { + uint32_t label_constant = GetLabelConst(instr); + return label_constant == 0; // Emitted label const in reg-exp engine. +} + +bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; } + +bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; } + +bool Assembler::IsBeqzc(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + return opcode == POP66 && GetRsField(instr) != 0; +} + +bool Assembler::IsBnezc(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + return opcode == POP76 && GetRsField(instr) != 0; +} + +bool Assembler::IsBeqc(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rs = GetRsField(instr); + uint32_t rt = GetRtField(instr); + return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0 +} + +bool Assembler::IsBnec(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rs = GetRsField(instr); + uint32_t rt = GetRtField(instr); + return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0 +} + +bool Assembler::IsJicOrJialc(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rs = GetRsField(instr); + return (opcode == POP66 || opcode == POP76) && rs == 0; +} + +bool Assembler::IsJump(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rt_field = GetRtField(instr); + uint32_t rd_field = GetRdField(instr); + uint32_t function_field = GetFunctionField(instr); + // Checks if the instruction is a jump. + return opcode == J || opcode == JAL || + (opcode == SPECIAL && rt_field == 0 && + ((function_field == JALR) || + (rd_field == 0 && (function_field == JR)))); +} + +bool Assembler::IsJ(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is a jump. + return opcode == J; +} + +bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; } + +bool Assembler::IsJr(Instr instr) { + if (!IsMipsArchVariant(kMips32r6)) { + return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR; + } else { + return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) == 0 && + GetFunctionField(instr) == JALR; + } +} + +bool Assembler::IsJalr(Instr instr) { + return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) != 0 && + GetFunctionField(instr) == JALR; +} + +bool Assembler::IsLui(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is a load upper immediate. + return opcode == LUI; +} + +bool Assembler::IsOri(Instr instr) { + uint32_t opcode = GetOpcodeField(instr); + // Checks if the instruction is a load upper immediate. + return opcode == ORI; +} + +bool Assembler::IsAddu(Instr instr, Register rd, Register rs, Register rt) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rd_field = GetRd(instr); + uint32_t rs_field = GetRs(instr); + uint32_t rt_field = GetRt(instr); + uint32_t sa_field = GetSaField(instr); + uint32_t rd_reg = static_cast(rd.code()); + uint32_t rs_reg = static_cast(rs.code()); + uint32_t rt_reg = static_cast(rt.code()); + uint32_t function_field = GetFunction(instr); + return opcode == SPECIAL && sa_field == 0 && function_field == ADDU && + rd_reg == rd_field && rs_reg == rs_field && rt_reg == rt_field; +} + +bool Assembler::IsMov(Instr instr, Register rd, Register rs) { + uint32_t opcode = GetOpcodeField(instr); + uint32_t rd_field = GetRd(instr); + uint32_t rs_field = GetRs(instr); + uint32_t rt_field = GetRt(instr); + uint32_t rd_reg = static_cast(rd.code()); + uint32_t rs_reg = static_cast(rs.code()); + uint32_t function_field = GetFunctionField(instr); + // Checks if the instruction is a OR with zero_reg argument (aka MOV). + bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg && + rs_field == rs_reg && rt_field == 0; + return res; +} + +bool Assembler::IsNop(Instr instr, unsigned int type) { + // See Assembler::nop(type). + DCHECK_LT(type, 32); + uint32_t opcode = GetOpcodeField(instr); + uint32_t function = GetFunctionField(instr); + uint32_t rt = GetRt(instr); + uint32_t rd = GetRd(instr); + uint32_t sa = GetSa(instr); + + // Traditional mips nop == sll(zero_reg, zero_reg, 0) + // When marking non-zero type, use sll(zero_reg, at, type) + // to avoid use of mips ssnop and ehb special encodings + // of the sll instruction. + + Register nop_rt_reg = (type == 0) ? zero_reg : at; + bool ret = (opcode == SPECIAL && function == SLL && + rd == static_cast(ToNumber(zero_reg)) && + rt == static_cast(ToNumber(nop_rt_reg)) && sa == type); + + return ret; +} + +int32_t Assembler::GetBranchOffset(Instr instr) { + DCHECK(IsBranch(instr)); + return (static_cast(instr & kImm16Mask)) << 2; +} + +bool Assembler::IsLw(Instr instr) { + return (static_cast(instr & kOpcodeMask) == LW); +} + +int16_t Assembler::GetLwOffset(Instr instr) { + DCHECK(IsLw(instr)); + return ((instr & kImm16Mask)); +} + +Instr Assembler::SetLwOffset(Instr instr, int16_t offset) { + DCHECK(IsLw(instr)); + + // We actually create a new lw instruction based on the original one. + Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) | + (offset & kImm16Mask); + + return temp_instr; +} + +bool Assembler::IsSw(Instr instr) { + return (static_cast(instr & kOpcodeMask) == SW); +} + +Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { + DCHECK(IsSw(instr)); + return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); +} + +bool Assembler::IsAddImmediate(Instr instr) { + return ((instr & kOpcodeMask) == ADDIU); +} + +Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { + DCHECK(IsAddImmediate(instr)); + return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); +} + +bool Assembler::IsAndImmediate(Instr instr) { + return GetOpcodeField(instr) == ANDI; +} + +static Assembler::OffsetSize OffsetSizeInBits(Instr instr) { + if (IsMipsArchVariant(kMips32r6)) { + if (Assembler::IsBc(instr)) { + return Assembler::OffsetSize::kOffset26; + } else if (Assembler::IsBzc(instr)) { + return Assembler::OffsetSize::kOffset21; + } + } + return Assembler::OffsetSize::kOffset16; +} + +static inline int32_t AddBranchOffset(int pos, Instr instr) { + int bits = OffsetSizeInBits(instr); + const int32_t mask = (1 << bits) - 1; + bits = 32 - bits; + + // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming + // the compiler uses arithmetic shifts for signed integers. + int32_t imm = ((instr & mask) << bits) >> (bits - 2); + + if (imm == kEndOfChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + return pos + Assembler::kBranchPCOffset + imm; + } +} + +uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) { + DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic)); + int16_t jic_offset = GetImmediate16(instr_jic); + int16_t lui_offset = GetImmediate16(instr_lui); + + if (jic_offset < 0) { + lui_offset += kImm16Mask; + } + uint32_t lui_offset_u = (static_cast(lui_offset)) << kLuiShift; + uint32_t jic_offset_u = static_cast(jic_offset) & kImm16Mask; + + return lui_offset_u | jic_offset_u; +} + +// Use just lui and jic instructions. Insert lower part of the target address in +// jic offset part. Since jic sign-extends offset and then add it with register, +// before that addition, difference between upper part of the target address and +// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted +// in jic register with lui instruction. +void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset, + int16_t* jic_offset) { + *lui_offset = (address & kHiMask) >> kLuiShift; + *jic_offset = address & kLoMask; + + if (*jic_offset < 0) { + *lui_offset -= kImm16Mask; + } +} + +void Assembler::UnpackTargetAddressUnsigned(uint32_t address, + uint32_t* lui_offset, + uint32_t* jic_offset) { + int16_t lui_offset16 = (address & kHiMask) >> kLuiShift; + int16_t jic_offset16 = address & kLoMask; + + if (jic_offset16 < 0) { + lui_offset16 -= kImm16Mask; + } + *lui_offset = static_cast(lui_offset16) & kImm16Mask; + *jic_offset = static_cast(jic_offset16) & kImm16Mask; +} + +void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui, + Address offset_lui, Instr instr_ori, + Address offset_ori) { + DCHECK(IsLui(instr_lui)); + DCHECK(IsOri(instr_ori)); + instr_at_put(static_cast(pc + offset_lui), + instr_lui | ((imm >> kLuiShift) & kImm16Mask)); + instr_at_put(static_cast(pc + offset_ori), + instr_ori | (imm & kImm16Mask)); +} + +void Assembler::PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr_lui, + Address offset_lui, Instr instr_ori, + Address offset_ori) { + DCHECK(IsLui(instr_lui)); + DCHECK(IsOri(instr_ori)); + instr_at_put(pc + offset_lui, instr_lui | ((imm >> kLuiShift) & kImm16Mask)); + instr_at_put(pc + offset_ori, instr_ori | (imm & kImm16Mask)); +} + +int32_t Assembler::GetLuiOriImmediate(Instr instr_lui, Instr instr_ori) { + DCHECK(IsLui(instr_lui)); + DCHECK(IsOri(instr_ori)); + int32_t imm; + imm = (instr_lui & static_cast(kImm16Mask)) << kLuiShift; + imm |= (instr_ori & static_cast(kImm16Mask)); + return imm; +} + +int Assembler::target_at(int pos, bool is_internal) { + Instr instr = instr_at(pos); + if (is_internal) { + if (instr == 0) { + return kEndOfChain; + } else { + int32_t instr_address = reinterpret_cast(buffer_start_ + pos); + int delta = static_cast(instr_address - instr); + DCHECK(pos > delta); + return pos - delta; + } + } + if ((instr & ~kImm16Mask) == 0) { + // Emitted label constant, not part of a branch. + if (instr == 0) { + return kEndOfChain; + } else { + int32_t imm18 = ((instr & static_cast(kImm16Mask)) << 16) >> 14; + return (imm18 + pos); + } + } + // Check we have a branch or jump instruction. + DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra)); + if (IsBranch(instr)) { + return AddBranchOffset(pos, instr); + } else if (IsMov(instr, t8, ra)) { + int32_t imm32; + Instr instr_lui = instr_at(pos + 2 * kInstrSize); + Instr instr_ori = instr_at(pos + 3 * kInstrSize); + imm32 = GetLuiOriImmediate(instr_lui, instr_ori); + if (imm32 == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } + return pos + Assembler::kLongBranchPCOffset + imm32; + } else { + DCHECK(IsLui(instr)); + if (IsNal(instr_at(pos + kInstrSize))) { + int32_t imm32; + Instr instr_lui = instr_at(pos + 0 * kInstrSize); + Instr instr_ori = instr_at(pos + 2 * kInstrSize); + imm32 = GetLuiOriImmediate(instr_lui, instr_ori); + if (imm32 == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } + return pos + Assembler::kLongBranchPCOffset + imm32; + } else { + Instr instr1 = instr_at(pos + 0 * kInstrSize); + Instr instr2 = instr_at(pos + 1 * kInstrSize); + DCHECK(IsOri(instr2) || IsJicOrJialc(instr2)); + int32_t imm; + if (IsJicOrJialc(instr2)) { + imm = CreateTargetAddress(instr1, instr2); + } else { + imm = GetLuiOriImmediate(instr1, instr2); + } + + if (imm == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + uint32_t instr_address = reinterpret_cast(buffer_start_ + pos); + int32_t delta = instr_address - imm; + DCHECK(pos > delta); + return pos - delta; + } + } + } +} + +static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos, + Instr instr) { + int32_t bits = OffsetSizeInBits(instr); + int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset); + DCHECK_EQ(imm & 3, 0); + imm >>= 2; + + const int32_t mask = (1 << bits) - 1; + instr &= ~mask; + DCHECK(is_intn(imm, bits)); + + return instr | (imm & mask); +} + +void Assembler::target_at_put(int32_t pos, int32_t target_pos, + bool is_internal) { + Instr instr = instr_at(pos); + + if (is_internal) { + uint32_t imm = reinterpret_cast(buffer_start_) + target_pos; + instr_at_put(pos, imm); + return; + } + if ((instr & ~kImm16Mask) == 0) { + DCHECK(target_pos == kEndOfChain || target_pos >= 0); + // Emitted label constant, not part of a branch. + // Make label relative to Code pointer of generated Code object. + instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + return; + } + + DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra)); + if (IsBranch(instr)) { + instr = SetBranchOffset(pos, target_pos, instr); + instr_at_put(pos, instr); + } else if (IsMov(instr, t8, ra)) { + Instr instr_lui = instr_at(pos + 2 * kInstrSize); + Instr instr_ori = instr_at(pos + 3 * kInstrSize); + DCHECK(IsLui(instr_lui)); + DCHECK(IsOri(instr_ori)); + + int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset); + + if (is_int16(imm_short)) { + // Optimize by converting to regular branch with 16-bit + // offset + Instr instr_b = BEQ; + instr_b = SetBranchOffset(pos, target_pos, instr_b); + + Instr instr_j = instr_at(pos + 5 * kInstrSize); + Instr instr_branch_delay; + + if (IsJump(instr_j)) { + // Case when branch delay slot is protected. + instr_branch_delay = nopInstr; + } else { + // Case when branch delay slot is used. + instr_branch_delay = instr_at(pos + 7 * kInstrSize); + } + instr_at_put(pos + 0 * kInstrSize, instr_b); + instr_at_put(pos + 1 * kInstrSize, instr_branch_delay); + } else { + int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); + DCHECK_EQ(imm & 3, 0); + + instr_lui &= ~kImm16Mask; + instr_ori &= ~kImm16Mask; + + PatchLuiOriImmediate(pos, imm, instr_lui, 2 * kInstrSize, instr_ori, + 3 * kInstrSize); + } + } else { + DCHECK(IsLui(instr)); + if (IsNal(instr_at(pos + kInstrSize))) { + Instr instr_lui = instr_at(pos + 0 * kInstrSize); + Instr instr_ori = instr_at(pos + 2 * kInstrSize); + DCHECK(IsLui(instr_lui)); + DCHECK(IsOri(instr_ori)); + int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); + DCHECK_EQ(imm & 3, 0); + if (is_int16(imm + Assembler::kLongBranchPCOffset - + Assembler::kBranchPCOffset)) { + // Optimize by converting to regular branch and link with 16-bit + // offset. + Instr instr_b = REGIMM | BGEZAL; // Branch and link. + instr_b = SetBranchOffset(pos, target_pos, instr_b); + // Correct ra register to point to one instruction after jalr from + // TurboAssembler::BranchAndLinkLong. + Instr instr_a = ADDIU | ra.code() << kRsShift | ra.code() << kRtShift | + kOptimizedBranchAndLinkLongReturnOffset; + + instr_at_put(pos, instr_b); + instr_at_put(pos + 1 * kInstrSize, instr_a); + } else { + instr_lui &= ~kImm16Mask; + instr_ori &= ~kImm16Mask; + PatchLuiOriImmediate(pos, imm, instr_lui, 0 * kInstrSize, instr_ori, + 2 * kInstrSize); + } + } else { + Instr instr1 = instr_at(pos + 0 * kInstrSize); + Instr instr2 = instr_at(pos + 1 * kInstrSize); + DCHECK(IsOri(instr2) || IsJicOrJialc(instr2)); + uint32_t imm = reinterpret_cast(buffer_start_) + target_pos; + DCHECK_EQ(imm & 3, 0); + DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2))); + instr1 &= ~kImm16Mask; + instr2 &= ~kImm16Mask; + + if (IsJicOrJialc(instr2)) { + uint32_t lui_offset_u, jic_offset_u; + UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); + instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u); + instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u); + } else { + PatchLuiOriImmediate(pos, imm, instr1, 0 * kInstrSize, instr2, + 1 * kInstrSize); + } + } + } +} + +void Assembler::print(const Label* L) { + if (L->is_unused()) { + PrintF("unused label\n"); + } else if (L->is_bound()) { + PrintF("bound label to %d\n", L->pos()); + } else if (L->is_linked()) { + Label l; + l.link_to(L->pos()); + PrintF("unbound label"); + while (l.is_linked()) { + PrintF("@ %d ", l.pos()); + Instr instr = instr_at(l.pos()); + if ((instr & ~kImm16Mask) == 0) { + PrintF("value\n"); + } else { + PrintF("%d\n", instr); + } + next(&l, is_internal_reference(&l)); + } + } else { + PrintF("label in inconsistent state (pos = %d)\n", L->pos_); + } +} + +void Assembler::bind_to(Label* L, int pos) { + DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. + int32_t trampoline_pos = kInvalidSlotPos; + bool is_internal = false; + if (L->is_linked() && !trampoline_emitted_) { + unbound_labels_count_--; + if (!is_internal_reference(L)) { + next_buffer_check_ += kTrampolineSlotsSize; + } + } + + while (L->is_linked()) { + int32_t fixup_pos = L->pos(); + int32_t dist = pos - fixup_pos; + is_internal = is_internal_reference(L); + next(L, is_internal); // Call next before overwriting link with target at + // fixup_pos. + Instr instr = instr_at(fixup_pos); + if (is_internal) { + target_at_put(fixup_pos, pos, is_internal); + } else { + if (IsBranch(instr)) { + int branch_offset = BranchOffset(instr); + if (dist > branch_offset) { + if (trampoline_pos == kInvalidSlotPos) { + trampoline_pos = get_trampoline_entry(fixup_pos); + CHECK_NE(trampoline_pos, kInvalidSlotPos); + } + CHECK((trampoline_pos - fixup_pos) <= branch_offset); + target_at_put(fixup_pos, trampoline_pos, false); + fixup_pos = trampoline_pos; + } + target_at_put(fixup_pos, pos, false); + } else { + target_at_put(fixup_pos, pos, false); + } + } + } + L->bind_to(pos); + + // Keep track of the last bound label so we don't eliminate any instructions + // before a bound label. + if (pos > last_bound_pos_) last_bound_pos_ = pos; +} + +void Assembler::bind(Label* L) { + DCHECK(!L->is_bound()); // Label can only be bound once. + bind_to(L, pc_offset()); +} + +void Assembler::next(Label* L, bool is_internal) { + DCHECK(L->is_linked()); + int link = target_at(L->pos(), is_internal); + if (link == kEndOfChain) { + L->Unuse(); + } else { + DCHECK_GE(link, 0); + L->link_to(link); + } +} + +bool Assembler::is_near(Label* L) { + DCHECK(L->is_bound()); + return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; +} + +bool Assembler::is_near(Label* L, OffsetSize bits) { + if (L == nullptr || !L->is_bound()) return true; + return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize; +} + +bool Assembler::is_near_branch(Label* L) { + DCHECK(L->is_bound()); + return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L); +} + +int Assembler::BranchOffset(Instr instr) { + // At pre-R6 and for other R6 branches the offset is 16 bits. + int bits = OffsetSize::kOffset16; + + if (IsMipsArchVariant(kMips32r6)) { + uint32_t opcode = GetOpcodeField(instr); + switch (opcode) { + // Checks BC or BALC. + case BC: + case BALC: + bits = OffsetSize::kOffset26; + break; + + // Checks BEQZC or BNEZC. + case POP66: + case POP76: + if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21; + break; + default: + break; + } + } + + return (1 << (bits + 2 - 1)) - 1; +} + +// We have to use a temporary register for things that can be relocated even +// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction +// space. There is no guarantee that the relocated location can be similarly +// encoded. +bool Assembler::MustUseReg(RelocInfo::Mode rmode) { + return !RelocInfo::IsNoInfo(rmode); +} + +void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt, + Register rd, uint16_t sa, + SecondaryField func) { + DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | + (rd.code() << kRdShift) | (sa << kSaShift) | func; + emit(instr); +} + +void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt, + uint16_t msb, uint16_t lsb, + SecondaryField func) { + DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | + (msb << kRdShift) | (lsb << kSaShift) | func; + emit(instr); +} + +void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, + FPURegister ft, FPURegister fs, FPURegister fd, + SecondaryField func) { + DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid()); + Instr instr = opcode | fmt | (ft.code() << kFtShift) | + (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; + emit(instr); +} + +void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft, + FPURegister fs, FPURegister fd, + SecondaryField func) { + DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); + Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) | + (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; + emit(instr); +} + +void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, + FPURegister fs, FPURegister fd, + SecondaryField func) { + DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid()); + Instr instr = opcode | fmt | (rt.code() << kRtShift) | + (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; + emit(instr); +} + +void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, + FPUControlRegister fs, SecondaryField func) { + DCHECK(fs.is_valid() && rt.is_valid()); + Instr instr = + opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; + emit(instr); +} + +// Instructions with immediate value. +// Registers are in the order of the instruction encoding, from left to right. +void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt, + int32_t j, + CompactBranchType is_compact_branch) { + DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | + (j & kImm16Mask); + emit(instr, is_compact_branch); +} + +void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt, + int32_t offset9, int bit6, + SecondaryField func) { + DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) && + is_uint1(bit6)); + Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) | + ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift | + func; + emit(instr); +} + +void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF, + int32_t j, + CompactBranchType is_compact_branch) { + DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); + emit(instr, is_compact_branch); +} + +void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft, + int32_t j, + CompactBranchType is_compact_branch) { + DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); + Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | + (j & kImm16Mask); + emit(instr, is_compact_branch); +} + +void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21, + CompactBranchType is_compact_branch) { + DCHECK(rs.is_valid() && (is_int21(offset21))); + Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask); + emit(instr, is_compact_branch); +} + +void Assembler::GenInstrImmediate(Opcode opcode, Register rs, + uint32_t offset21) { + DCHECK(rs.is_valid() && (is_uint21(offset21))); + Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask); + emit(instr); +} + +void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26, + CompactBranchType is_compact_branch) { + DCHECK(is_int26(offset26)); + Instr instr = opcode | (offset26 & kImm26Mask); + emit(instr, is_compact_branch); +} + +void Assembler::GenInstrJump(Opcode opcode, uint32_t address) { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(is_uint26(address)); + Instr instr = opcode | address; + emit(instr); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +// MSA instructions +void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8, + MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8)); + Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) | + (ws.code() << kWsShift) | (wd.code() << kWdShift); + emit(instr); +} + +void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df, + int32_t imm5, MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(ws.is_valid() && wd.is_valid()); + DCHECK((operation == MAXI_S) || (operation == MINI_S) || + (operation == CEQI) || (operation == CLTI_S) || + (operation == CLEI_S) + ? is_int5(imm5) + : is_uint5(imm5)); + Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) | + (ws.code() << kWsShift) | (wd.code() << kWdShift); + emit(instr); +} + +void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df, + uint32_t m, MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m)); + Instr instr = MSA | operation | df | (m << kWtShift) | + (ws.code() << kWsShift) | (wd.code() << kWdShift); + emit(instr); +} + +void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df, + int32_t imm10, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(wd.is_valid() && is_int10(imm10)); + Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) | + (wd.code() << kWdShift); + emit(instr); +} + +template +void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df, + RegType t, MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid()); + Instr instr = MSA | operation | df | (t.code() << kWtShift) | + (ws.code() << kWsShift) | (wd.code() << kWdShift); + emit(instr); +} + +template +void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df, + uint32_t n, SrcType src, DstType dst) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n)); + Instr instr = MSA | operation | df | (n << kWtShift) | + (src.code() << kWsShift) | (dst.code() << kWdShift) | + MSA_ELM_MINOR; + emit(instr); +} + +void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df, + MSARegister wt, MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid()); + DCHECK_LT(df, 2); + Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) | + (ws.code() << kWsShift) | (wd.code() << kWdShift); + emit(instr); +} + +void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt, + MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid()); + Instr instr = MSA | operation | (wt.code() << kWtShift) | + (ws.code() << kWsShift) | (wd.code() << kWdShift) | + MSA_VEC_2R_2RF_MINOR; + emit(instr); +} + +void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10, + Register rs, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10)); + Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) | + (rs.code() << kWsShift) | (wd.code() << kWdShift); + emit(instr); +} + +void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df, + MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(ws.is_valid() && wd.is_valid()); + Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) | + (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR; + emit(instr); +} + +void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df, + MSARegister ws, MSARegister wd) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(ws.is_valid() && wd.is_valid()); + Instr instr = MSA | MSA_2RF_FORMAT | operation | df | + (ws.code() << kWsShift) | (wd.code() << kWdShift) | + MSA_VEC_2R_2RF_MINOR; + emit(instr); +} + +void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt, + int32_t offset16) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(wt.is_valid() && is_int16(offset16)); + BlockTrampolinePoolScope block_trampoline_pool(this); + Instr instr = + COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask); + emit(instr); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +// Returns the next free trampoline entry. +int32_t Assembler::get_trampoline_entry(int32_t pos) { + int32_t trampoline_entry = kInvalidSlotPos; + + if (!internal_trampoline_exception_) { + if (trampoline_.start() > pos) { + trampoline_entry = trampoline_.take_slot(); + } + + if (kInvalidSlotPos == trampoline_entry) { + internal_trampoline_exception_ = true; + } + } + return trampoline_entry; +} + +uint32_t Assembler::jump_address(Label* L) { + int32_t target_pos; + + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + return kEndOfJumpChain; + } + } + + uint32_t imm = reinterpret_cast(buffer_start_) + target_pos; + DCHECK_EQ(imm & 3, 0); + + return imm; +} + +uint32_t Assembler::branch_long_offset(Label* L) { + int32_t target_pos; + + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + return kEndOfJumpChain; + } + } + + DCHECK(is_int32(static_cast(target_pos) - + static_cast(pc_offset() + kLongBranchPCOffset))); + int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset); + DCHECK_EQ(offset & 3, 0); + + return offset; +} + +int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { + int32_t target_pos; + int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0; + + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset() + pad); + } else { + L->link_to(pc_offset() + pad); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad); + DCHECK(is_intn(offset, bits + 2)); + DCHECK_EQ(offset & 3, 0); + + return offset; +} + +void Assembler::label_at_put(Label* L, int at_offset) { + int target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + int32_t imm18 = target_pos - at_offset; + DCHECK_EQ(imm18 & 3, 0); + int32_t imm16 = imm18 >> 2; + DCHECK(is_int16(imm16)); + instr_at_put(at_offset, (imm16 & kImm16Mask)); + } else { + target_pos = kEndOfChain; + instr_at_put(at_offset, 0); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + } + L->link_to(at_offset); + } +} + +//------- Branch and jump instructions -------- + +void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); } + +void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); } + +void Assembler::bc(int32_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::balc(int32_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::beq(Register rs, Register rt, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BEQ, rs, rt, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bgez(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(REGIMM, rs, BGEZ, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bgezc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bgeuc(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + DCHECK(rt != zero_reg); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bgec(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + DCHECK(rt != zero_reg); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bgezal(Register rs, int16_t offset) { + DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg); + DCHECK(rs != ra); + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(REGIMM, rs, BGEZAL, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bgtz(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BGTZ, rs, zero_reg, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bgtzc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + GenInstrImmediate(BGTZL, zero_reg, rt, offset, + CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::blez(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BLEZ, rs, zero_reg, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::blezc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + GenInstrImmediate(BLEZL, zero_reg, rt, offset, + CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bltzc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bltuc(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + DCHECK(rt != zero_reg); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bltc(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + DCHECK(rt != zero_reg); + DCHECK(rs.code() != rt.code()); + GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bltz(Register rs, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(REGIMM, rs, BLTZ, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bltzal(Register rs, int16_t offset) { + DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg); + DCHECK(rs != ra); + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(REGIMM, rs, BLTZAL, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bne(Register rs, Register rt, int16_t offset) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(BNE, rs, rt, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bovc(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + if (rs.code() >= rt.code()) { + GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + } else { + GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); + } +} + +void Assembler::bnvc(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + if (rs.code() >= rt.code()) { + GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + } else { + GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); + } +} + +void Assembler::blezalc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + DCHECK(rt != ra); + GenInstrImmediate(BLEZ, zero_reg, rt, offset, + CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bgezalc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + DCHECK(rt != ra); + GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bgezall(Register rs, int16_t offset) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + DCHECK(rs != ra); + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrImmediate(REGIMM, rs, BGEZALL, offset); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bltzalc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + DCHECK(rt != ra); + GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bgtzalc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + DCHECK(rt != ra); + GenInstrImmediate(BGTZ, zero_reg, rt, offset, + CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::beqzalc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + DCHECK(rt != ra); + GenInstrImmediate(ADDI, zero_reg, rt, offset, + CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bnezalc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rt != zero_reg); + DCHECK(rt != ra); + GenInstrImmediate(DADDI, zero_reg, rt, offset, + CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::beqc(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0); + if (rs.code() < rt.code()) { + GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + } else { + GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); + } +} + +void Assembler::beqzc(Register rs, int32_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::bnec(Register rs, Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0); + if (rs.code() < rt.code()) { + GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + } else { + GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); + } +} + +void Assembler::bnezc(Register rs, int32_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH); +} + +void Assembler::j(int32_t target) { +#if DEBUG + // Get pc of delay slot. + uint32_t ipc = reinterpret_cast(pc_ + 1 * kInstrSize); + bool in_range = ((ipc ^ static_cast(target)) >> + (kImm26Bits + kImmFieldShift)) == 0; + DCHECK(in_range && ((target & 3) == 0)); +#endif + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrJump(J, (target >> 2) & kImm26Mask); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::jr(Register rs) { + if (!IsMipsArchVariant(kMips32r6)) { + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); + BlockTrampolinePoolFor(1); // For associated delay slot. + } else { + jalr(rs, zero_reg); + } +} + +void Assembler::jal(int32_t target) { +#ifdef DEBUG + // Get pc of delay slot. + uint32_t ipc = reinterpret_cast(pc_ + 1 * kInstrSize); + bool in_range = ((ipc ^ static_cast(target)) >> + (kImm26Bits + kImmFieldShift)) == 0; + DCHECK(in_range && ((target & 3) == 0)); +#endif + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrJump(JAL, (target >> 2) & kImm26Mask); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::jalr(Register rs, Register rd) { + DCHECK(rs.code() != rd.code()); + BlockTrampolinePoolScope block_trampoline_pool(this); + GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::jic(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrImmediate(POP66, zero_reg, rt, offset); +} + +void Assembler::jialc(Register rt, int16_t offset) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrImmediate(POP76, zero_reg, rt, offset); +} + +// -------Data-processing-instructions--------- + +// Arithmetic. + +void Assembler::addu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); +} + +void Assembler::addiu(Register rd, Register rs, int32_t j) { + GenInstrImmediate(ADDIU, rs, rd, j); +} + +void Assembler::subu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); +} + +void Assembler::mul(Register rd, Register rs, Register rt) { + if (!IsMipsArchVariant(kMips32r6)) { + GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); + } else { + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH); + } +} + +void Assembler::mulu(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U); +} + +void Assembler::muh(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH); +} + +void Assembler::muhu(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U); +} + +void Assembler::mod(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD); +} + +void Assembler::modu(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U); +} + +void Assembler::mult(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); +} + +void Assembler::multu(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); +} + +void Assembler::div(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); +} + +void Assembler::div(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD); +} + +void Assembler::divu(Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); +} + +void Assembler::divu(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U); +} + +// Logical. + +void Assembler::and_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); +} + +void Assembler::andi(Register rt, Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(ANDI, rs, rt, j); +} + +void Assembler::or_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); +} + +void Assembler::ori(Register rt, Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(ORI, rs, rt, j); +} + +void Assembler::xor_(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); +} + +void Assembler::xori(Register rt, Register rs, int32_t j) { + DCHECK(is_uint16(j)); + GenInstrImmediate(XORI, rs, rt, j); +} + +void Assembler::nor(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); +} + +// Shifts. +void Assembler::sll(Register rd, Register rt, uint16_t sa, + bool coming_from_nop) { + // Don't allow nop instructions in the form sll zero_reg, zero_reg to be + // generated using the sll instruction. They must be generated using + // nop(int/NopMarkerTypes). + DCHECK(coming_from_nop || !(rd == zero_reg && rt == zero_reg)); + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL); +} + +void Assembler::sllv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); +} + +void Assembler::srl(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL); +} + +void Assembler::srlv(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV); +} + +void Assembler::sra(Register rd, Register rt, uint16_t sa) { + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA); +} + +void Assembler::srav(Register rd, Register rt, Register rs) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); +} + +void Assembler::rotr(Register rd, Register rt, uint16_t sa) { + // Should be called via MacroAssembler::Ror. + DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) | + (rd.code() << kRdShift) | (sa << kSaShift) | SRL; + emit(instr); +} + +void Assembler::rotrv(Register rd, Register rt, Register rs) { + // Should be called via MacroAssembler::Ror. + DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid()); + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) | + (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; + emit(instr); +} + +void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) { + DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid()); + DCHECK_LE(sa, 3); + DCHECK(IsMipsArchVariant(kMips32r6)); + Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift | + rd.code() << kRdShift | sa << kSaShift | LSA; + emit(instr); +} + +// ------------Memory-instructions------------- + +void Assembler::AdjustBaseAndOffset(MemOperand* src, + OffsetAccessType access_type, + int second_access_add_to_offset) { + // This method is used to adjust the base register and offset pair + // for a load/store when the offset doesn't fit into int16_t. + // It is assumed that 'base + offset' is sufficiently aligned for memory + // operands that are machine word in size or smaller. For doubleword-sized + // operands it's assumed that 'base' is a multiple of 8, while 'offset' + // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments + // and spilled variables on the stack accessed relative to the stack + // pointer register). + // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8. + + bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; + bool two_accesses = static_cast(access_type) || !doubleword_aligned; + DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. + + // is_int16 must be passed a signed value, hence the static cast below. + if (is_int16(src->offset()) && + (!two_accesses || is_int16(static_cast( + src->offset() + second_access_add_to_offset)))) { + // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified + // value) fits into int16_t. + return; + } + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(src->rm() != scratch); // Must not overwrite the register 'base' + // while loading 'offset'. + +#ifdef DEBUG + // Remember the "(mis)alignment" of 'offset', it will be checked at the end. + uint32_t misalignment = src->offset() & (kDoubleSize - 1); +#endif + + // Do not load the whole 32-bit 'offset' if it can be represented as + // a sum of two 16-bit signed offsets. This can save an instruction or two. + // To simplify matters, only do this for a symmetric range of offsets from + // about -64KB to about +64KB, allowing further addition of 4 when accessing + // 64-bit variables with two 32-bit accesses. + constexpr int32_t kMinOffsetForSimpleAdjustment = + 0x7FF8; // Max int16_t that's a multiple of 8. + constexpr int32_t kMaxOffsetForSimpleAdjustment = + 2 * kMinOffsetForSimpleAdjustment; + if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { + addiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment); + src->offset_ -= kMinOffsetForSimpleAdjustment; + } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && + src->offset() < 0) { + addiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); + src->offset_ += kMinOffsetForSimpleAdjustment; + } else if (IsMipsArchVariant(kMips32r6)) { + // On r6 take advantage of the aui instruction, e.g.: + // aui at, base, offset_high + // lw reg_lo, offset_low(at) + // lw reg_hi, (offset_low+4)(at) + // or when offset_low+4 overflows int16_t: + // aui at, base, offset_high + // addiu at, at, 8 + // lw reg_lo, (offset_low-8)(at) + // lw reg_hi, (offset_low-4)(at) + int16_t offset_high = static_cast(src->offset() >> 16); + int16_t offset_low = static_cast(src->offset()); + offset_high += (offset_low < 0) + ? 1 + : 0; // Account for offset sign extension in load/store. + aui(scratch, src->rm(), static_cast(offset_high)); + if (two_accesses && !is_int16(static_cast( + offset_low + second_access_add_to_offset))) { + // Avoid overflow in the 16-bit offset of the load/store instruction when + // adding 4. + addiu(scratch, scratch, kDoubleSize); + offset_low -= kDoubleSize; + } + src->offset_ = offset_low; + } else { + // Do not load the whole 32-bit 'offset' if it can be represented as + // a sum of three 16-bit signed offsets. This can save an instruction. + // To simplify matters, only do this for a symmetric range of offsets from + // about -96KB to about +96KB, allowing further addition of 4 when accessing + // 64-bit variables with two 32-bit accesses. + constexpr int32_t kMinOffsetForMediumAdjustment = + 2 * kMinOffsetForSimpleAdjustment; + constexpr int32_t kMaxOffsetForMediumAdjustment = + 3 * kMinOffsetForSimpleAdjustment; + if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { + addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); + addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); + src->offset_ -= kMinOffsetForMediumAdjustment; + } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && + src->offset() < 0) { + addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); + addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); + src->offset_ += kMinOffsetForMediumAdjustment; + } else { + // Now that all shorter options have been exhausted, load the full 32-bit + // offset. + int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); + lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); + ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. + addu(scratch, scratch, src->rm()); + src->offset_ -= loaded_offset; + } + } + src->rm_ = scratch; + + DCHECK(is_int16(src->offset())); + if (two_accesses) { + DCHECK(is_int16( + static_cast(src->offset() + second_access_add_to_offset))); + } + DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); +} + +void Assembler::lb(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(LB, source.rm(), rd, source.offset()); +} + +void Assembler::lbu(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(LBU, source.rm(), rd, source.offset()); +} + +void Assembler::lh(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(LH, source.rm(), rd, source.offset()); +} + +void Assembler::lhu(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(LHU, source.rm(), rd, source.offset()); +} + +void Assembler::lw(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(LW, source.rm(), rd, source.offset()); +} + +void Assembler::lwl(Register rd, const MemOperand& rs) { + DCHECK(is_int16(rs.offset_)); + DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kMips32r2)); + GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); +} + +void Assembler::lwr(Register rd, const MemOperand& rs) { + DCHECK(is_int16(rs.offset_)); + DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kMips32r2)); + GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); +} + +void Assembler::sb(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(SB, source.rm(), rd, source.offset()); +} + +void Assembler::sh(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(SH, source.rm(), rd, source.offset()); +} + +void Assembler::sw(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + GenInstrImmediate(SW, source.rm(), rd, source.offset()); +} + +void Assembler::swl(Register rd, const MemOperand& rs) { + DCHECK(is_int16(rs.offset_)); + DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kMips32r2)); + GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); +} + +void Assembler::swr(Register rd, const MemOperand& rs) { + DCHECK(is_int16(rs.offset_)); + DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kMips32r2)); + GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); +} + +void Assembler::ll(Register rd, const MemOperand& rs) { + if (IsMipsArchVariant(kMips32r6)) { + DCHECK(is_int9(rs.offset_)); + GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6); + } else { + DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kMips32r2)); + DCHECK(is_int16(rs.offset_)); + GenInstrImmediate(LL, rs.rm(), rd, rs.offset_); + } +} + +void Assembler::sc(Register rd, const MemOperand& rs) { + if (IsMipsArchVariant(kMips32r6)) { + DCHECK(is_int9(rs.offset_)); + GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6); + } else { + DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kMips32r2)); + GenInstrImmediate(SC, rs.rm(), rd, rs.offset_); + } +} + +void Assembler::llx(Register rd, const MemOperand& rs) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(is_int9(rs.offset_)); + GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, LL_R6); +} + +void Assembler::scx(Register rd, const MemOperand& rs) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(is_int9(rs.offset_)); + GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, SC_R6); +} + +void Assembler::lui(Register rd, int32_t j) { + DCHECK(is_uint16(j) || is_int16(j)); + GenInstrImmediate(LUI, zero_reg, rd, j); +} + +void Assembler::aui(Register rt, Register rs, int32_t j) { + // This instruction uses same opcode as 'lui'. The difference in encoding is + // 'lui' has zero reg. for rs field. + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs != zero_reg); + DCHECK(is_uint16(j)); + GenInstrImmediate(LUI, rs, rt, j); +} + +// ---------PC-Relative instructions----------- + +void Assembler::addiupc(Register rs, int32_t imm19) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs.is_valid() && is_int19(imm19)); + uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask); + GenInstrImmediate(PCREL, rs, imm21); +} + +void Assembler::lwpc(Register rs, int32_t offset19) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs.is_valid() && is_int19(offset19)); + uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask); + GenInstrImmediate(PCREL, rs, imm21); +} + +void Assembler::auipc(Register rs, int16_t imm16) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs.is_valid()); + uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask); + GenInstrImmediate(PCREL, rs, imm21); +} + +void Assembler::aluipc(Register rs, int16_t imm16) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(rs.is_valid()); + uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask); + GenInstrImmediate(PCREL, rs, imm21); +} + +// -------------Misc-instructions-------------- + +// Break / Trap instructions. +void Assembler::break_(uint32_t code, bool break_as_stop) { + DCHECK_EQ(code & ~0xFFFFF, 0); + // We need to invalidate breaks that could be stops as well because the + // simulator expects a char pointer after the stop instruction. + // See constants-mips.h for explanation. + DCHECK( + (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) || + (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode))); + Instr break_instr = SPECIAL | BREAK | (code << 6); + emit(break_instr); +} + +void Assembler::stop(uint32_t code) { + DCHECK_GT(code, kMaxWatchpointCode); + DCHECK_LE(code, kMaxStopCode); +#if V8_HOST_ARCH_MIPS + break_(0x54321); +#else // V8_HOST_ARCH_MIPS + break_(code, true); +#endif +} + +void Assembler::tge(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + +void Assembler::tgeu(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift | + code << 6; + emit(instr); +} + +void Assembler::tlt(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + +void Assembler::tltu(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift | + code << 6; + emit(instr); +} + +void Assembler::teq(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + +void Assembler::tne(Register rs, Register rt, uint16_t code) { + DCHECK(is_uint10(code)); + Instr instr = + SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; + emit(instr); +} + +void Assembler::sync() { + Instr sync_instr = SPECIAL | SYNC; + emit(sync_instr); +} + +// Move from HI/LO register. + +void Assembler::mfhi(Register rd) { + GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI); +} + +void Assembler::mflo(Register rd) { + GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO); +} + +// Set on less than instructions. +void Assembler::slt(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT); +} + +void Assembler::sltu(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU); +} + +void Assembler::slti(Register rt, Register rs, int32_t j) { + GenInstrImmediate(SLTI, rs, rt, j); +} + +void Assembler::sltiu(Register rt, Register rs, int32_t j) { + GenInstrImmediate(SLTIU, rs, rt, j); +} + +// Conditional move. +void Assembler::movz(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ); +} + +void Assembler::movn(Register rd, Register rs, Register rt) { + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); +} + +void Assembler::movt(Register rd, Register rs, uint16_t cc) { + Register rt = Register::from_code((cc & 0x0007) << 2 | 1); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); +} + +void Assembler::movf(Register rd, Register rs, uint16_t cc) { + Register rt = Register::from_code((cc & 0x0007) << 2 | 0); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); +} + +void Assembler::seleqz(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S); +} + +// Bit twiddling. +void Assembler::clz(Register rd, Register rs) { + if (!IsMipsArchVariant(kMips32r6)) { + // Clz instr requires same GPR number in 'rd' and 'rt' fields. + GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); + } else { + GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6); + } +} + +void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { + // Should be called via MacroAssembler::Ins. + // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); +} + +void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { + // Should be called via MacroAssembler::Ext. + // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); +} + +void Assembler::bitswap(Register rd, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL); +} + +void Assembler::pref(int32_t hint, const MemOperand& rs) { + DCHECK(!IsMipsArchVariant(kLoongson)); + DCHECK(is_uint5(hint) && is_uint16(rs.offset_)); + Instr instr = + PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_); + emit(instr); +} + +void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK(is_uint3(bp)); + uint16_t sa = (ALIGN << kBp2Bits) | bp; + GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL); +} + +// Byte swap. +void Assembler::wsbh(Register rd, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL); +} + +void Assembler::seh(Register rd, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL); +} + +void Assembler::seb(Register rd, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); +} + +// --------Coprocessor-instructions---------------- + +// Load, store, move. +void Assembler::lwc1(FPURegister fd, const MemOperand& src) { + MemOperand tmp = src; + AdjustBaseAndOffset(&tmp); + GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset()); +} + +void Assembler::swc1(FPURegister fd, const MemOperand& src) { + MemOperand tmp = src; + AdjustBaseAndOffset(&tmp); + GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset()); +} + +void Assembler::mtc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MTC1, rt, fs, f0); +} + +void Assembler::mthc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MTHC1, rt, fs, f0); +} + +void Assembler::mfc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MFC1, rt, fs, f0); +} + +void Assembler::mfhc1(Register rt, FPURegister fs) { + GenInstrRegister(COP1, MFHC1, rt, fs, f0); +} + +void Assembler::ctc1(Register rt, FPUControlRegister fs) { + GenInstrRegister(COP1, CTC1, rt, fs); +} + +void Assembler::cfc1(Register rt, FPUControlRegister fs) { + GenInstrRegister(COP1, CFC1, rt, fs); +} + +void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C); +} + +void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C); +} + +void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + + GenInstrRegister(COP1, fmt, ft, fs, fd, SEL); +} + +void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) { + sel(S, fd, fs, ft); +} + +void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) { + sel(D, fd, fs, ft); +} + +void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C); +} + +void Assembler::selnez(Register rd, Register rs, Register rt) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S); +} + +void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C); +} + +void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) { + seleqz(D, fd, fs, ft); +} + +void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) { + seleqz(S, fd, fs, ft); +} + +void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) { + selnez(D, fd, fs, ft); +} + +void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) { + selnez(S, fd, fs, ft); +} + +void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C); +} + +void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C); +} + +void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1); + GenInstrRegister(COP1, S, ft, fs, fd, MOVF); +} + +void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1); + GenInstrRegister(COP1, D, ft, fs, fd, MOVF); +} + +void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0); + GenInstrRegister(COP1, S, ft, fs, fd, MOVF); +} + +void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0); + GenInstrRegister(COP1, D, ft, fs, fd, MOVF); +} + +// Arithmetic. + +void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, S, ft, fs, fd, ADD_S); +} + +void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, ADD_D); +} + +void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, S, ft, fs, fd, SUB_S); +} + +void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, SUB_D); +} + +void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, S, ft, fs, fd, MUL_S); +} + +void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, MUL_D); +} + +void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r2)); + GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S); +} + +void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r2)); + GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D); +} + +void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r2)); + GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S); +} + +void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r2)); + GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D); +} + +void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S); +} + +void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D); +} + +void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S); +} + +void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D); +} + +void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, S, ft, fs, fd, DIV_S); +} + +void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) { + GenInstrRegister(COP1, D, ft, fs, fd, DIV_D); +} + +void Assembler::abs_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, ABS_S); +} + +void Assembler::abs_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, ABS_D); +} + +void Assembler::mov_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, MOV_D); +} + +void Assembler::mov_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, MOV_S); +} + +void Assembler::neg_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, NEG_S); +} + +void Assembler::neg_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, NEG_D); +} + +void Assembler::sqrt_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S); +} + +void Assembler::sqrt_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D); +} + +void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S); +} + +void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D); +} + +void Assembler::recip_d(FPURegister fd, FPURegister fs) { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D); +} + +void Assembler::recip_s(FPURegister fd, FPURegister fs) { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S); +} + +// Conversions. + +void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S); +} + +void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D); +} + +void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S); +} + +void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D); +} + +void Assembler::round_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S); +} + +void Assembler::round_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D); +} + +void Assembler::floor_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S); +} + +void Assembler::floor_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D); +} + +void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); +} + +void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); +} + +void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); } + +void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, f0, fs, fd, RINT); +} + +void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); } + +void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); +} + +void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); +} + +void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); +} + +void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); +} + +void Assembler::round_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); +} + +void Assembler::round_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D); +} + +void Assembler::floor_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S); +} + +void Assembler::floor_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D); +} + +void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S); +} + +void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); +} + +void Assembler::class_s(FPURegister fd, FPURegister fs) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S); +} + +void Assembler::class_d(FPURegister fd, FPURegister fs) { + DCHECK(IsMipsArchVariant(kMips32r6)); + GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D); +} + +void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MIN); +} + +void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MINA); +} + +void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MAX); +} + +void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA); +} + +void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) { + min(S, fd, fs, ft); +} + +void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) { + min(D, fd, fs, ft); +} + +void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) { + max(S, fd, fs, ft); +} + +void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) { + max(D, fd, fs, ft); +} + +void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) { + mina(S, fd, fs, ft); +} + +void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) { + mina(D, fd, fs, ft); +} + +void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) { + maxa(S, fd, fs, ft); +} + +void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) { + maxa(D, fd, fs, ft); +} + +void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); +} + +void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); +} + +void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); +} + +void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); +} + +void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); + GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); +} + +void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { + GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); +} + +// Conditions for >= MIPSr6. +void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd, + FPURegister fs, FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + DCHECK_EQ(fmt & ~(31 << kRsShift), 0); + Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift | + fd.code() << kFdShift | (0 << 5) | cond; + emit(instr); +} + +void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, + FPURegister ft) { + cmp(cond, W, fd, fs, ft); +} + +void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, + FPURegister ft) { + cmp(cond, L, fd, fs, ft); +} + +void Assembler::bc1eqz(int16_t offset, FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + BlockTrampolinePoolScope block_trampoline_pool(this); + Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask); + emit(instr); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bc1nez(int16_t offset, FPURegister ft) { + DCHECK(IsMipsArchVariant(kMips32r6)); + BlockTrampolinePoolScope block_trampoline_pool(this); + Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask); + emit(instr); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +// Conditions for < MIPSr6. +void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs, + FPURegister ft, uint16_t cc) { + DCHECK(is_uint3(cc)); + DCHECK(fmt == S || fmt == D); + DCHECK_EQ(fmt & ~(31 << kRsShift), 0); + Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | cc << 8 | + 3 << 4 | cond; + emit(instr); +} + +void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft, + uint16_t cc) { + c(cond, S, fs, ft, cc); +} + +void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft, + uint16_t cc) { + c(cond, D, fs, ft, cc); +} + +void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) { + DCHECK_EQ(src2, 0.0); + mtc1(zero_reg, f14); + cvt_d_w(f14, f14); + c(cond, D, src1, f14, 0); +} + +void Assembler::bc1f(int16_t offset, uint16_t cc) { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(is_uint3(cc)); + Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); + emit(instr); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +void Assembler::bc1t(int16_t offset, uint16_t cc) { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(is_uint3(cc)); + Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); + emit(instr); + BlockTrampolinePoolFor(1); // For associated delay slot. +} + +// ---------- MSA instructions ------------ +#define MSA_BRANCH_LIST(V) \ + V(bz_v, BZ_V) \ + V(bz_b, BZ_B) \ + V(bz_h, BZ_H) \ + V(bz_w, BZ_W) \ + V(bz_d, BZ_D) \ + V(bnz_v, BNZ_V) \ + V(bnz_b, BNZ_B) \ + V(bnz_h, BNZ_H) \ + V(bnz_w, BNZ_W) \ + V(bnz_d, BNZ_D) + +#define MSA_BRANCH(name, opcode) \ + void Assembler::name(MSARegister wt, int16_t offset) { \ + GenInstrMsaBranch(opcode, wt, offset); \ + } + +MSA_BRANCH_LIST(MSA_BRANCH) +#undef MSA_BRANCH +#undef MSA_BRANCH_LIST + +#define MSA_LD_ST_LIST(V) \ + V(ld_b, LD_B) \ + V(ld_h, LD_H) \ + V(ld_w, LD_W) \ + V(ld_d, LD_D) \ + V(st_b, ST_B) \ + V(st_h, ST_H) \ + V(st_w, ST_W) \ + V(st_d, ST_D) + +#define MSA_LD_ST(name, opcode) \ + void Assembler::name(MSARegister wd, const MemOperand& rs) { \ + MemOperand source = rs; \ + AdjustBaseAndOffset(&source); \ + if (is_int10(source.offset())) { \ + GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \ + } else { \ + UseScratchRegisterScope temps(this); \ + Register scratch = temps.Acquire(); \ + DCHECK(rs.rm() != scratch); \ + addiu(scratch, source.rm(), source.offset()); \ + GenInstrMsaMI10(opcode, 0, scratch, wd); \ + } \ + } + +MSA_LD_ST_LIST(MSA_LD_ST) +#undef MSA_LD_ST +#undef MSA_LD_ST_LIST + +#define MSA_I10_LIST(V) \ + V(ldi_b, I5_DF_b) \ + V(ldi_h, I5_DF_h) \ + V(ldi_w, I5_DF_w) \ + V(ldi_d, I5_DF_d) + +#define MSA_I10(name, format) \ + void Assembler::name(MSARegister wd, int32_t imm10) { \ + GenInstrMsaI10(LDI, format, imm10, wd); \ + } +MSA_I10_LIST(MSA_I10) +#undef MSA_I10 +#undef MSA_I10_LIST + +#define MSA_I5_LIST(V) \ + V(addvi, ADDVI) \ + V(subvi, SUBVI) \ + V(maxi_s, MAXI_S) \ + V(maxi_u, MAXI_U) \ + V(mini_s, MINI_S) \ + V(mini_u, MINI_U) \ + V(ceqi, CEQI) \ + V(clti_s, CLTI_S) \ + V(clti_u, CLTI_U) \ + V(clei_s, CLEI_S) \ + V(clei_u, CLEI_U) + +#define MSA_I5_FORMAT(name, opcode, format) \ + void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ + uint32_t imm5) { \ + GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd); \ + } + +#define MSA_I5(name, opcode) \ + MSA_I5_FORMAT(name, opcode, b) \ + MSA_I5_FORMAT(name, opcode, h) \ + MSA_I5_FORMAT(name, opcode, w) \ + MSA_I5_FORMAT(name, opcode, d) + +MSA_I5_LIST(MSA_I5) +#undef MSA_I5 +#undef MSA_I5_FORMAT +#undef MSA_I5_LIST + +#define MSA_I8_LIST(V) \ + V(andi_b, ANDI_B) \ + V(ori_b, ORI_B) \ + V(nori_b, NORI_B) \ + V(xori_b, XORI_B) \ + V(bmnzi_b, BMNZI_B) \ + V(bmzi_b, BMZI_B) \ + V(bseli_b, BSELI_B) \ + V(shf_b, SHF_B) \ + V(shf_h, SHF_H) \ + V(shf_w, SHF_W) + +#define MSA_I8(name, opcode) \ + void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \ + GenInstrMsaI8(opcode, imm8, ws, wd); \ + } + +MSA_I8_LIST(MSA_I8) +#undef MSA_I8 +#undef MSA_I8_LIST + +#define MSA_VEC_LIST(V) \ + V(and_v, AND_V) \ + V(or_v, OR_V) \ + V(nor_v, NOR_V) \ + V(xor_v, XOR_V) \ + V(bmnz_v, BMNZ_V) \ + V(bmz_v, BMZ_V) \ + V(bsel_v, BSEL_V) + +#define MSA_VEC(name, opcode) \ + void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \ + GenInstrMsaVec(opcode, wt, ws, wd); \ + } + +MSA_VEC_LIST(MSA_VEC) +#undef MSA_VEC +#undef MSA_VEC_LIST + +#define MSA_2R_LIST(V) \ + V(pcnt, PCNT) \ + V(nloc, NLOC) \ + V(nlzc, NLZC) + +#define MSA_2R_FORMAT(name, opcode, format) \ + void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \ + GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd); \ + } + +#define MSA_2R(name, opcode) \ + MSA_2R_FORMAT(name, opcode, b) \ + MSA_2R_FORMAT(name, opcode, h) \ + MSA_2R_FORMAT(name, opcode, w) \ + MSA_2R_FORMAT(name, opcode, d) + +MSA_2R_LIST(MSA_2R) +#undef MSA_2R +#undef MSA_2R_FORMAT +#undef MSA_2R_LIST + +#define MSA_FILL(format) \ + void Assembler::fill_##format(MSARegister wd, Register rs) { \ + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); \ + DCHECK(rs.is_valid() && wd.is_valid()); \ + Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format | \ + (rs.code() << kWsShift) | (wd.code() << kWdShift) | \ + MSA_VEC_2R_2RF_MINOR; \ + emit(instr); \ + } + +MSA_FILL(b) +MSA_FILL(h) +MSA_FILL(w) +#undef MSA_FILL + +#define MSA_2RF_LIST(V) \ + V(fclass, FCLASS) \ + V(ftrunc_s, FTRUNC_S) \ + V(ftrunc_u, FTRUNC_U) \ + V(fsqrt, FSQRT) \ + V(frsqrt, FRSQRT) \ + V(frcp, FRCP) \ + V(frint, FRINT) \ + V(flog2, FLOG2) \ + V(fexupl, FEXUPL) \ + V(fexupr, FEXUPR) \ + V(ffql, FFQL) \ + V(ffqr, FFQR) \ + V(ftint_s, FTINT_S) \ + V(ftint_u, FTINT_U) \ + V(ffint_s, FFINT_S) \ + V(ffint_u, FFINT_U) + +#define MSA_2RF_FORMAT(name, opcode, format) \ + void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \ + GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd); \ + } + +#define MSA_2RF(name, opcode) \ + MSA_2RF_FORMAT(name, opcode, w) \ + MSA_2RF_FORMAT(name, opcode, d) + +MSA_2RF_LIST(MSA_2RF) +#undef MSA_2RF +#undef MSA_2RF_FORMAT +#undef MSA_2RF_LIST + +#define MSA_3R_LIST(V) \ + V(sll, SLL_MSA) \ + V(sra, SRA_MSA) \ + V(srl, SRL_MSA) \ + V(bclr, BCLR) \ + V(bset, BSET) \ + V(bneg, BNEG) \ + V(binsl, BINSL) \ + V(binsr, BINSR) \ + V(addv, ADDV) \ + V(subv, SUBV) \ + V(max_s, MAX_S) \ + V(max_u, MAX_U) \ + V(min_s, MIN_S) \ + V(min_u, MIN_U) \ + V(max_a, MAX_A) \ + V(min_a, MIN_A) \ + V(ceq, CEQ) \ + V(clt_s, CLT_S) \ + V(clt_u, CLT_U) \ + V(cle_s, CLE_S) \ + V(cle_u, CLE_U) \ + V(add_a, ADD_A) \ + V(adds_a, ADDS_A) \ + V(adds_s, ADDS_S) \ + V(adds_u, ADDS_U) \ + V(ave_s, AVE_S) \ + V(ave_u, AVE_U) \ + V(aver_s, AVER_S) \ + V(aver_u, AVER_U) \ + V(subs_s, SUBS_S) \ + V(subs_u, SUBS_U) \ + V(subsus_u, SUBSUS_U) \ + V(subsuu_s, SUBSUU_S) \ + V(asub_s, ASUB_S) \ + V(asub_u, ASUB_U) \ + V(mulv, MULV) \ + V(maddv, MADDV) \ + V(msubv, MSUBV) \ + V(div_s, DIV_S_MSA) \ + V(div_u, DIV_U) \ + V(mod_s, MOD_S) \ + V(mod_u, MOD_U) \ + V(dotp_s, DOTP_S) \ + V(dotp_u, DOTP_U) \ + V(dpadd_s, DPADD_S) \ + V(dpadd_u, DPADD_U) \ + V(dpsub_s, DPSUB_S) \ + V(dpsub_u, DPSUB_U) \ + V(pckev, PCKEV) \ + V(pckod, PCKOD) \ + V(ilvl, ILVL) \ + V(ilvr, ILVR) \ + V(ilvev, ILVEV) \ + V(ilvod, ILVOD) \ + V(vshf, VSHF) \ + V(srar, SRAR) \ + V(srlr, SRLR) \ + V(hadd_s, HADD_S) \ + V(hadd_u, HADD_U) \ + V(hsub_s, HSUB_S) \ + V(hsub_u, HSUB_U) + +#define MSA_3R_FORMAT(name, opcode, format) \ + void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ + MSARegister wt) { \ + GenInstrMsa3R(opcode, MSA_3R_DF_##format, wt, ws, wd); \ + } + +#define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format) \ + void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ + Register rt) { \ + GenInstrMsa3R(opcode, MSA_3R_DF_##format, rt, ws, wd); \ + } + +#define MSA_3R(name, opcode) \ + MSA_3R_FORMAT(name, opcode, b) \ + MSA_3R_FORMAT(name, opcode, h) \ + MSA_3R_FORMAT(name, opcode, w) \ + MSA_3R_FORMAT(name, opcode, d) + +#define MSA_3R_SLD_SPLAT(name, opcode) \ + MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \ + MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \ + MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \ + MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d) + +MSA_3R_LIST(MSA_3R) +MSA_3R_SLD_SPLAT(sld, SLD) +MSA_3R_SLD_SPLAT(splat, SPLAT) + +#undef MSA_3R +#undef MSA_3R_FORMAT +#undef MSA_3R_FORMAT_SLD_SPLAT +#undef MSA_3R_SLD_SPLAT +#undef MSA_3R_LIST + +#define MSA_3RF_LIST1(V) \ + V(fcaf, FCAF) \ + V(fcun, FCUN) \ + V(fceq, FCEQ) \ + V(fcueq, FCUEQ) \ + V(fclt, FCLT) \ + V(fcult, FCULT) \ + V(fcle, FCLE) \ + V(fcule, FCULE) \ + V(fsaf, FSAF) \ + V(fsun, FSUN) \ + V(fseq, FSEQ) \ + V(fsueq, FSUEQ) \ + V(fslt, FSLT) \ + V(fsult, FSULT) \ + V(fsle, FSLE) \ + V(fsule, FSULE) \ + V(fadd, FADD) \ + V(fsub, FSUB) \ + V(fmul, FMUL) \ + V(fdiv, FDIV) \ + V(fmadd, FMADD) \ + V(fmsub, FMSUB) \ + V(fexp2, FEXP2) \ + V(fmin, FMIN) \ + V(fmin_a, FMIN_A) \ + V(fmax, FMAX) \ + V(fmax_a, FMAX_A) \ + V(fcor, FCOR) \ + V(fcune, FCUNE) \ + V(fcne, FCNE) \ + V(fsor, FSOR) \ + V(fsune, FSUNE) \ + V(fsne, FSNE) + +#define MSA_3RF_LIST2(V) \ + V(fexdo, FEXDO) \ + V(ftq, FTQ) \ + V(mul_q, MUL_Q) \ + V(madd_q, MADD_Q) \ + V(msub_q, MSUB_Q) \ + V(mulr_q, MULR_Q) \ + V(maddr_q, MADDR_Q) \ + V(msubr_q, MSUBR_Q) + +#define MSA_3RF_FORMAT(name, opcode, df, df_c) \ + void Assembler::name##_##df(MSARegister wd, MSARegister ws, \ + MSARegister wt) { \ + GenInstrMsa3RF(opcode, df_c, wt, ws, wd); \ + } + +#define MSA_3RF_1(name, opcode) \ + MSA_3RF_FORMAT(name, opcode, w, 0) \ + MSA_3RF_FORMAT(name, opcode, d, 1) + +#define MSA_3RF_2(name, opcode) \ + MSA_3RF_FORMAT(name, opcode, h, 0) \ + MSA_3RF_FORMAT(name, opcode, w, 1) + +MSA_3RF_LIST1(MSA_3RF_1) +MSA_3RF_LIST2(MSA_3RF_2) +#undef MSA_3RF_1 +#undef MSA_3RF_2 +#undef MSA_3RF_FORMAT +#undef MSA_3RF_LIST1 +#undef MSA_3RF_LIST2 + +void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SLDI, ELM_DF_B, n, ws, wd); +} + +void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SLDI, ELM_DF_H, n, ws, wd); +} + +void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SLDI, ELM_DF_W, n, ws, wd); +} + +void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SLDI, ELM_DF_D, n, ws, wd); +} + +void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SPLATI, ELM_DF_B, n, ws, wd); +} + +void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SPLATI, ELM_DF_H, n, ws, wd); +} + +void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SPLATI, ELM_DF_W, n, ws, wd); +} + +void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(SPLATI, ELM_DF_D, n, ws, wd); +} + +void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(COPY_S, ELM_DF_B, n, ws, rd); +} + +void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(COPY_S, ELM_DF_H, n, ws, rd); +} + +void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(COPY_S, ELM_DF_W, n, ws, rd); +} + +void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(COPY_U, ELM_DF_B, n, ws, rd); +} + +void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(COPY_U, ELM_DF_H, n, ws, rd); +} + +void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) { + GenInstrMsaElm(COPY_U, ELM_DF_W, n, ws, rd); +} + +void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) { + GenInstrMsaElm(INSERT, ELM_DF_B, n, rs, wd); +} + +void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) { + GenInstrMsaElm(INSERT, ELM_DF_H, n, rs, wd); +} + +void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) { + GenInstrMsaElm(INSERT, ELM_DF_W, n, rs, wd); +} + +void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) { + GenInstrMsaElm(INSVE, ELM_DF_B, n, ws, wd); +} + +void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) { + GenInstrMsaElm(INSVE, ELM_DF_H, n, ws, wd); +} + +void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) { + GenInstrMsaElm(INSVE, ELM_DF_W, n, ws, wd); +} + +void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) { + GenInstrMsaElm(INSVE, ELM_DF_D, n, ws, wd); +} + +void Assembler::move_v(MSARegister wd, MSARegister ws) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(ws.is_valid() && wd.is_valid()); + Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) | + (wd.code() << kWdShift) | MSA_ELM_MINOR; + emit(instr); +} + +void Assembler::ctcmsa(MSAControlRegister cd, Register rs) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(cd.is_valid() && rs.is_valid()); + Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) | + (cd.code() << kWdShift) | MSA_ELM_MINOR; + emit(instr); +} + +void Assembler::cfcmsa(Register rd, MSAControlRegister cs) { + DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); + DCHECK(rd.is_valid() && cs.is_valid()); + Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) | + (rd.code() << kWdShift) | MSA_ELM_MINOR; + emit(instr); +} + +#define MSA_BIT_LIST(V) \ + V(slli, SLLI) \ + V(srai, SRAI) \ + V(srli, SRLI) \ + V(bclri, BCLRI) \ + V(bseti, BSETI) \ + V(bnegi, BNEGI) \ + V(binsli, BINSLI) \ + V(binsri, BINSRI) \ + V(sat_s, SAT_S) \ + V(sat_u, SAT_U) \ + V(srari, SRARI) \ + V(srlri, SRLRI) + +#define MSA_BIT_FORMAT(name, opcode, format) \ + void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ + uint32_t m) { \ + GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd); \ + } + +#define MSA_BIT(name, opcode) \ + MSA_BIT_FORMAT(name, opcode, b) \ + MSA_BIT_FORMAT(name, opcode, h) \ + MSA_BIT_FORMAT(name, opcode, w) \ + MSA_BIT_FORMAT(name, opcode, d) + +MSA_BIT_LIST(MSA_BIT) +#undef MSA_BIT +#undef MSA_BIT_FORMAT +#undef MSA_BIT_LIST + +int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta) { + Instr instr = instr_at(pc); + + if (RelocInfo::IsInternalReference(rmode)) { + int32_t* p = reinterpret_cast(pc); + if (*p == 0) { + return 0; // Number of instructions patched. + } + *p += pc_delta; + return 1; // Number of instructions patched. + } else { + DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); + if (IsLui(instr)) { + Instr instr1 = instr_at(pc + 0 * kInstrSize); + Instr instr2 = instr_at(pc + 1 * kInstrSize); + DCHECK(IsOri(instr2) || IsJicOrJialc(instr2)); + int32_t imm; + if (IsJicOrJialc(instr2)) { + imm = CreateTargetAddress(instr1, instr2); + } else { + imm = GetLuiOriImmediate(instr1, instr2); + } + + if (imm == kEndOfJumpChain) { + return 0; // Number of instructions patched. + } + imm += pc_delta; + DCHECK_EQ(imm & 3, 0); + instr1 &= ~kImm16Mask; + instr2 &= ~kImm16Mask; + + if (IsJicOrJialc(instr2)) { + uint32_t lui_offset_u, jic_offset_u; + Assembler::UnpackTargetAddressUnsigned(imm, + &lui_offset_u, &jic_offset_u); + instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); + instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); + } else { + PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, + 1 * kInstrSize); + } + return 2; // Number of instructions patched. + } else { + UNREACHABLE(); + } + } +} + +void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta) { + Instr instr = instr_at(pc); + + DCHECK(RelocInfo::IsRelativeCodeTarget(rmode)); + if (IsLui(instr)) { + Instr instr1 = instr_at(pc + 0 * kInstrSize); + Instr instr2 = instr_at(pc + 1 * kInstrSize); + Instr instr3 = instr_at(pc + 2 * kInstrSize); + int32_t imm; + Address ori_offset; + if (IsNal(instr2)) { + instr2 = instr3; + ori_offset = 2 * kInstrSize; + } else { + ori_offset = 1 * kInstrSize; + } + DCHECK(IsOri(instr2)); + imm = GetLuiOriImmediate(instr1, instr2); + instr1 &= ~kImm16Mask; + instr2 &= ~kImm16Mask; + + if (imm == kEndOfJumpChain) { + return; + } + imm -= pc_delta; + DCHECK_EQ(imm & 3, 0); + PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, ori_offset); + return; + } else { + UNREACHABLE(); + } +} + +void Assembler::GrowBuffer() { + // Compute new buffer size. + int old_size = buffer_->size(); + int new_size = std::min(2 * old_size, old_size + 1 * MB); + + // Some internal data structures overflow for very large buffers, + // they must ensure that kMaximalBufferSize is not too large. + if (new_size > kMaximalBufferSize) { + V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer"); + } + + // Set up new buffer. + std::unique_ptr new_buffer = buffer_->Grow(new_size); + DCHECK_EQ(new_size, new_buffer->size()); + byte* new_start = new_buffer->start(); + + // Copy the data. + int pc_delta = new_start - buffer_start_; + int rc_delta = (new_start + new_size) - (buffer_start_ + old_size); + size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); + MemMove(new_start, buffer_start_, pc_offset()); + MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), + reloc_size); + + // Switch buffers. + buffer_ = std::move(new_buffer); + buffer_start_ = new_start; + pc_ += pc_delta; + pc_for_safepoint_ += pc_delta; + reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.last_pc() + pc_delta); + + // Relocate runtime entries. + base::Vector instructions{buffer_start_, + static_cast(pc_offset())}; + base::Vector reloc_info{reloc_info_writer.pos(), reloc_size}; + for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) { + RelocInfo::Mode rmode = it.rinfo()->rmode(); + if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED || + rmode == RelocInfo::INTERNAL_REFERENCE) { + RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta); + } + } + + DCHECK(!overflow()); +} + +void Assembler::db(uint8_t data) { + CheckForEmitInForbiddenSlot(); + *reinterpret_cast(pc_) = data; + pc_ += sizeof(uint8_t); +} + +void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) { + CheckForEmitInForbiddenSlot(); + if (!RelocInfo::IsNoInfo(rmode)) { + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); + RecordRelocInfo(rmode); + } + *reinterpret_cast(pc_) = data; + pc_ += sizeof(uint32_t); +} + +void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) { + CheckForEmitInForbiddenSlot(); + if (!RelocInfo::IsNoInfo(rmode)) { + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); + RecordRelocInfo(rmode); + } + *reinterpret_cast(pc_) = data; + pc_ += sizeof(uint64_t); +} + +void Assembler::dd(Label* label) { + uint32_t data; + CheckForEmitInForbiddenSlot(); + if (label->is_bound()) { + data = reinterpret_cast(buffer_start_ + label->pos()); + } else { + data = jump_address(label); + unbound_labels_count_++; + internal_reference_positions_.insert(label->pos()); + } + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + EmitHelper(data); +} + +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { + if (!ShouldRecordRelocInfo(rmode)) return; + // We do not try to reuse pool constants. + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); + DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here. + reloc_info_writer.Write(&rinfo); +} + +void Assembler::BlockTrampolinePoolFor(int instructions) { + CheckTrampolinePoolQuick(instructions); + BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); +} + +void Assembler::CheckTrampolinePool() { + // Some small sequences of instructions must not be broken up by the + // insertion of a trampoline pool; such sequences are protected by setting + // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, + // which are both checked here. Also, recursive calls to CheckTrampolinePool + // are blocked by trampoline_pool_blocked_nesting_. + if ((trampoline_pool_blocked_nesting_ > 0) || + (pc_offset() < no_trampoline_pool_before_)) { + // Emission is currently blocked; make sure we try again as soon as + // possible. + if (trampoline_pool_blocked_nesting_ > 0) { + next_buffer_check_ = pc_offset() + kInstrSize; + } else { + next_buffer_check_ = no_trampoline_pool_before_; + } + return; + } + + DCHECK(!trampoline_emitted_); + DCHECK_GE(unbound_labels_count_, 0); + if (unbound_labels_count_ > 0) { + // First we emit jump (2 instructions), then we emit trampoline pool. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label after_pool; + if (IsMipsArchVariant(kMips32r6)) { + bc(&after_pool); + } else { + b(&after_pool); + } + nop(); + + int pool_start = pc_offset(); + for (int i = 0; i < unbound_labels_count_; i++) { + { + if (IsMipsArchVariant(kMips32r6)) { + bc(&after_pool); + nop(); + } else { + GenPCRelativeJump(t8, t9, 0, RelocInfo::NO_INFO, + BranchDelaySlot::PROTECT); + } + } + } + // If unbound_labels_count_ is big enough, label after_pool will + // need a trampoline too, so we must create the trampoline before + // the bind operation to make sure function 'bind' can get this + // information. + trampoline_ = Trampoline(pool_start, unbound_labels_count_); + bind(&after_pool); + + trampoline_emitted_ = true; + // As we are only going to emit trampoline once, we need to prevent any + // further emission. + next_buffer_check_ = kMaxInt; + } + } else { + // Number of branches to unbound label at this point is zero, so we can + // move next buffer check to maximum. + next_buffer_check_ = + pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16; + } + return; +} + +Address Assembler::target_address_at(Address pc) { + Instr instr1 = instr_at(pc); + Instr instr2 = instr_at(pc + kInstrSize); + Instr instr3 = instr_at(pc + 2 * kInstrSize); + // Interpret 2 instructions generated by li (lui/ori) or optimized pairs + // lui/jic, aui/jic or lui/jialc. + if (IsLui(instr1)) { + if (IsOri(instr2)) { + Address target_address; + // Assemble the 32 bit value. + target_address = GetLuiOriImmediate(instr1, instr2); + if (IsAddu(instr3, t9, ra, t9)) { + target_address += pc + kRelativeJumpForBuiltinsOffset; + } + return target_address; + } else if (IsJicOrJialc(instr2)) { + // Assemble the 32 bit value. + return static_cast
(CreateTargetAddress(instr1, instr2)); + } else if (IsNal(instr2)) { + DCHECK(IsOri(instr3)); + Address target_address; + target_address = GetLuiOriImmediate(instr1, instr3); + return target_address + pc + kRelativeCallForBuiltinsOffset; + } + } + + // We should never get here, force a bad address if we do. + UNREACHABLE(); +} + +// On Mips, a target address is stored in a lui/ori instruction pair, each +// of which load 16 bits of the 32-bit address to a register. +// Patching the address must replace both instr, and flush the i-cache. +// On r6, target address is stored in a lui/jic pair, and both instr have to be +// patched. +void Assembler::set_target_value_at(Address pc, uint32_t target, + ICacheFlushMode icache_flush_mode) { + Instr instr1 = instr_at(pc); + Instr instr2 = instr_at(pc + kInstrSize); + +#ifdef DEBUG + // Check we have the result from a li macro-instruction, using instr pair. + DCHECK(IsLui(instr1) && + (IsOri(instr2) || IsJicOrJialc(instr2) || IsNal(instr2))); +#endif + + if (IsJicOrJialc(instr2)) { + // Must use 2 instructions to insure patchable code => use lui and jic + uint32_t lui_offset, jic_offset; + Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); + + instr1 &= ~kImm16Mask; + instr2 &= ~kImm16Mask; + + instr1 |= lui_offset; + instr2 |= jic_offset; + + instr_at_put(pc, instr1); + instr_at_put(pc + kInstrSize, instr2); + } else { + Instr instr3 = instr_at(pc + 2 * kInstrSize); + // If we are using relative calls/jumps for builtins. + if (IsNal(instr2)) { + target -= pc + kRelativeCallForBuiltinsOffset; + } + if (IsAddu(instr3, t9, ra, t9)) { + target -= pc + kRelativeJumpForBuiltinsOffset; + } + // Must use 2 instructions to insure patchable code => just use lui and ori. + // lui rt, upper-16. + // ori rt rt, lower-16. + if (IsNal(instr2)) { + instr1 &= ~kImm16Mask; + instr3 &= ~kImm16Mask; + PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr3, + 2 * kInstrSize); + } else { + instr1 &= ~kImm16Mask; + instr2 &= ~kImm16Mask; + PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr2, + 1 * kInstrSize); + } + } + + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + FlushInstructionCache(pc, 2 * sizeof(int32_t)); + } +} + +void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32, + RelocInfo::Mode rmode, + BranchDelaySlot bdslot) { + // Order of these instructions is relied upon when patching them + // or when changing imm32 that lui/ori pair loads. + or_(tf, ra, zero_reg); + nal(); // Relative place of nal instruction determines kLongBranchPCOffset. + if (!RelocInfo::IsNoInfo(rmode)) { + RecordRelocInfo(rmode); + } + lui(ts, (imm32 & kHiMask) >> kLuiShift); + ori(ts, ts, (imm32 & kImm16Mask)); + addu(ts, ra, ts); + if (bdslot == USE_DELAY_SLOT) { + or_(ra, tf, zero_reg); + } + jr(ts); + if (bdslot == PROTECT) { + or_(ra, tf, zero_reg); + } +} + +void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32, + RelocInfo::Mode rmode, + BranchDelaySlot bdslot) { + if (!RelocInfo::IsNoInfo(rmode)) { + RecordRelocInfo(rmode); + } + // Order of these instructions is relied upon when patching them + // or when changing imm32 that lui/ori pair loads. + lui(t, (imm32 & kHiMask) >> kLuiShift); + nal(); // Relative place of nal instruction determines kLongBranchPCOffset. + ori(t, t, (imm32 & kImm16Mask)); + addu(t, ra, t); + jalr(t); + if (bdslot == PROTECT) nop(); + set_pc_for_safepoint(); +} + +UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) + : available_(assembler->GetScratchRegisterList()), + old_available_(*available_) {} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + *available_ = old_available_; +} + +Register UseScratchRegisterScope::Acquire() { + DCHECK_NOT_NULL(available_); + return available_->PopFirst(); +} + +bool UseScratchRegisterScope::hasAvailable() const { + return !available_->is_empty(); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h new file mode 100644 index 00000000000000..628a8bc652caaa --- /dev/null +++ b/deps/v8/src/codegen/mips/assembler-mips.h @@ -0,0 +1,1924 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ +#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ + +#include +#include + +#include + +#include "src/codegen/assembler.h" +#include "src/codegen/external-reference.h" +#include "src/codegen/label.h" +#include "src/codegen/mips/constants-mips.h" +#include "src/codegen/mips/register-mips.h" +#include "src/objects/smi.h" + +namespace v8 { +namespace internal { + +class SafepointTableBuilder; + +// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. +enum BranchDelaySlot { USE_DELAY_SLOT, PROTECT }; + +// ----------------------------------------------------------------------------- +// Machine instruction Operands. + +// Class Operand represents a shifter operand in data processing instructions. +class Operand { + public: + // Immediate. + V8_INLINE explicit Operand(int32_t immediate, + RelocInfo::Mode rmode = RelocInfo::NO_INFO) + : rm_(no_reg), rmode_(rmode) { + value_.immediate = immediate; + } + V8_INLINE explicit Operand(const ExternalReference& f) + : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { + value_.immediate = static_cast(f.address()); + } + V8_INLINE explicit Operand(const char* s); + explicit Operand(Handle handle); + V8_INLINE explicit Operand(Smi value) + : rm_(no_reg), rmode_(RelocInfo::NO_INFO) { + value_.immediate = static_cast(value.ptr()); + } + + static Operand EmbeddedNumber(double number); // Smi or HeapNumber. + static Operand EmbeddedStringConstant(const StringConstantBase* str); + + // Register. + V8_INLINE explicit Operand(Register rm) : rm_(rm) {} + + // Return true if this is a register operand. + V8_INLINE bool is_reg() const; + + inline int32_t immediate() const; + + bool IsImmediate() const { return !rm_.is_valid(); } + + HeapObjectRequest heap_object_request() const { + DCHECK(IsHeapObjectRequest()); + return value_.heap_object_request; + } + + bool IsHeapObjectRequest() const { + DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); + DCHECK_IMPLIES(is_heap_object_request_, + rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || + rmode_ == RelocInfo::CODE_TARGET); + return is_heap_object_request_; + } + + Register rm() const { return rm_; } + + RelocInfo::Mode rmode() const { return rmode_; } + + private: + Register rm_; + union Value { + Value() {} + HeapObjectRequest heap_object_request; // if is_heap_object_request_ + int32_t immediate; // otherwise + } value_; // valid if rm_ == no_reg + bool is_heap_object_request_ = false; + RelocInfo::Mode rmode_; + + friend class Assembler; + // friend class MacroAssembler; +}; + +// On MIPS we have only one addressing mode with base_reg + offset. +// Class MemOperand represents a memory operand in load and store instructions. +class V8_EXPORT_PRIVATE MemOperand : public Operand { + public: + // Immediate value attached to offset. + enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 }; + + explicit MemOperand(Register rn, int32_t offset = 0); + explicit MemOperand(Register rn, int32_t unit, int32_t multiplier, + OffsetAddend offset_addend = offset_zero); + int32_t offset() const { return offset_; } + + bool OffsetIsInt16Encodable() const { return is_int16(offset_); } + + private: + int32_t offset_; + + friend class Assembler; +}; + +class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { + public: + // Create an assembler. Instructions and relocation information are emitted + // into a buffer, with the instructions starting from the beginning and the + // relocation information starting from the end of the buffer. See CodeDesc + // for a detailed comment on the layout (globals.h). + // + // If the provided buffer is nullptr, the assembler allocates and grows its + // own buffer. Otherwise it takes ownership of the provided buffer. + explicit Assembler(const AssemblerOptions&, + std::unique_ptr = {}); + + virtual ~Assembler() {} + + // GetCode emits any pending (non-emitted) code and fills the descriptor desc. + static constexpr int kNoHandlerTable = 0; + static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; + void GetCode(Isolate* isolate, CodeDesc* desc, + SafepointTableBuilder* safepoint_table_builder, + int handler_table_offset); + + // Convenience wrapper for code without safepoint or handler tables. + void GetCode(Isolate* isolate, CodeDesc* desc) { + GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); + } + + // Unused on this architecture. + void MaybeEmitOutOfLineConstantPool() {} + + // Mips uses BlockTrampolinePool to prevent generating trampoline inside a + // continuous instruction block. For Call instrution, it prevents generating + // trampoline between jalr and delay slot instruction. In the destructor of + // BlockTrampolinePool, it must check if it needs to generate trampoline + // immediately, if it does not do this, the branch range will go beyond the + // max branch offset, that means the pc_offset after call CheckTrampolinePool + // may have changed. So we use pc_for_safepoint_ here for safepoint record. + int pc_offset_for_safepoint() { + return static_cast(pc_for_safepoint_ - buffer_start_); + } + + // Label operations & relative jumps (PPUM Appendix D). + // + // Takes a branch opcode (cc) and a label (L) and generates + // either a backward branch or a forward branch and links it + // to the label fixup chain. Usage: + // + // Label L; // unbound label + // j(cc, &L); // forward branch to unbound label + // bind(&L); // bind label to the current pc + // j(cc, &L); // backward branch to bound label + // bind(&L); // illegal: a label may be bound only once + // + // Note: The same Label can be used for forward and backward branches + // but it may be bound only once. + void bind(Label* L); // Binds an unbound label L to current code position. + + enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; + + // Determines if Label is bound and near enough so that branch instruction + // can be used to reach it, instead of jump instruction. + bool is_near(Label* L); + bool is_near(Label* L, OffsetSize bits); + bool is_near_branch(Label* L); + inline bool is_near_pre_r6(Label* L) { + DCHECK(!IsMipsArchVariant(kMips32r6)); + return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; + } + inline bool is_near_r6(Label* L) { + DCHECK(IsMipsArchVariant(kMips32r6)); + return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize; + } + + int BranchOffset(Instr instr); + + // Returns the branch offset to the given label from the current code + // position. Links the label to the current position if it is still unbound. + // Manages the jump elimination optimization if the second parameter is true. + int32_t branch_offset_helper(Label* L, OffsetSize bits); + inline int32_t branch_offset(Label* L) { + return branch_offset_helper(L, OffsetSize::kOffset16); + } + inline int32_t branch_offset21(Label* L) { + return branch_offset_helper(L, OffsetSize::kOffset21); + } + inline int32_t branch_offset26(Label* L) { + return branch_offset_helper(L, OffsetSize::kOffset26); + } + inline int32_t shifted_branch_offset(Label* L) { + return branch_offset(L) >> 2; + } + inline int32_t shifted_branch_offset21(Label* L) { + return branch_offset21(L) >> 2; + } + inline int32_t shifted_branch_offset26(Label* L) { + return branch_offset26(L) >> 2; + } + uint32_t jump_address(Label* L); + uint32_t branch_long_offset(Label* L); + + // Puts a labels target address at the given position. + // The high 8 bits are set to zero. + void label_at_put(Label* L, int at_offset); + + // Read/Modify the code target address in the branch/call instruction at pc. + // The isolate argument is unused (and may be nullptr) when skipping flushing. + static Address target_address_at(Address pc); + V8_INLINE static void set_target_address_at( + Address pc, Address target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { + set_target_value_at(pc, static_cast(target), icache_flush_mode); + } + // On MIPS there is no Constant Pool so we skip that parameter. + V8_INLINE static Address target_address_at(Address pc, + Address constant_pool) { + return target_address_at(pc); + } + V8_INLINE static void set_target_address_at( + Address pc, Address constant_pool, Address target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { + set_target_address_at(pc, target, icache_flush_mode); + } + + static void set_target_value_at( + Address pc, uint32_t target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + + // This sets the branch destination (which gets loaded at the call address). + // This is for calls and branches within generated code. The serializer + // has already deserialized the lui/ori instructions etc. + inline static void deserialization_set_special_target_at( + Address instruction_payload, Code code, Address target); + + // Get the size of the special target encoded at 'instruction_payload'. + inline static int deserialization_special_target_size( + Address instruction_payload); + + // This sets the internal reference at the pc. + inline static void deserialization_set_target_internal_reference_at( + Address pc, Address target, + RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); + + // Difference between address of current opcode and target address offset. + static constexpr int kBranchPCOffset = kInstrSize; + + // Difference between address of current opcode and target address offset, + // when we are generatinga sequence of instructions for long relative PC + // branches. It is distance between address of the first instruction in + // the jump sequence, and the value that ra gets after calling nal(). + static constexpr int kLongBranchPCOffset = 3 * kInstrSize; + + // Adjust ra register in branch delay slot of bal instruction in order to skip + // instructions not needed after optimization of PIC in + // TurboAssembler::BranchAndLink method. + static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 3 * kInstrSize; + + // Offset of target relative address in calls/jumps for builtins. It is + // distance between instruction that is placed just after calling + // RecordRelocInfo, and the value that ra gets aftr calling nal(). + static constexpr int kRelativeJumpForBuiltinsOffset = 1 * kInstrSize; + // Relative target address of jumps for builtins when we use lui, ori, dsll, + // ori sequence when loading address that cannot fit into 32 bits. + static constexpr int kRelativeCallForBuiltinsOffset = 3 * kInstrSize; + + // Here we are patching the address in the LUI/ORI instruction pair. + // These values are used in the serialization process and must be zero for + // MIPS platform, as Code, Embedded Object or External-reference pointers + // are split across two consecutive instructions and don't exist separately + // in the code, so the serializer should not step forwards in memory after + // a target is resolved and written. + + static constexpr int kSpecialTargetSize = 0; + + // Number of consecutive instructions used to store 32bit constant. This + // constant is used in RelocInfo::target_address_address() function to tell + // serializer address of the instruction that follows LUI/ORI instruction + // pair. + static constexpr int kInstructionsFor32BitConstant = 2; + + // Max offset for instructions with 16-bit offset field + static constexpr int kMaxBranchOffset = (1 << (18 - 1)) - 1; + + // Max offset for compact branch instructions with 26-bit offset field + static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1; + + static constexpr int kTrampolineSlotsSize = + IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 7 * kInstrSize; + + RegList* GetScratchRegisterList() { return &scratch_register_list_; } + + // --------------------------------------------------------------------------- + // Code generation. + + // Insert the smallest number of nop instructions + // possible to align the pc offset to a multiple + // of m. m must be a power of 2 (>= 4). + void Align(int m); + // Insert the smallest number of zero bytes possible to align the pc offset + // to a mulitple of m. m must be a power of 2 (>= 2). + void DataAlign(int m); + // Aligns code to something that's optimal for a jump target for the platform. + void CodeTargetAlign(); + void LoopHeaderAlign() { CodeTargetAlign(); } + + // Different nop operations are used by the code generator to detect certain + // states of the generated code. + enum NopMarkerTypes { + NON_MARKING_NOP = 0, + DEBUG_BREAK_NOP, + // IC markers. + PROPERTY_ACCESS_INLINED, + PROPERTY_ACCESS_INLINED_CONTEXT, + PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, + // Helper values. + LAST_CODE_MARKER, + FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, + }; + + // Type == 0 is the default non-marking nop. For mips this is a + // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero + // marking, to avoid conflict with ssnop and ehb instructions. + void nop(unsigned int type = 0) { + DCHECK_LT(type, 32); + Register nop_rt_reg = (type == 0) ? zero_reg : at; + sll(zero_reg, nop_rt_reg, type, true); + } + + // --------Branch-and-jump-instructions---------- + // We don't use likely variant of instructions. + void b(int16_t offset); + inline void b(Label* L) { b(shifted_branch_offset(L)); } + void bal(int16_t offset); + inline void bal(Label* L) { bal(shifted_branch_offset(L)); } + void bc(int32_t offset); + inline void bc(Label* L) { bc(shifted_branch_offset26(L)); } + void balc(int32_t offset); + inline void balc(Label* L) { balc(shifted_branch_offset26(L)); } + + void beq(Register rs, Register rt, int16_t offset); + inline void beq(Register rs, Register rt, Label* L) { + beq(rs, rt, shifted_branch_offset(L)); + } + void bgez(Register rs, int16_t offset); + void bgezc(Register rt, int16_t offset); + inline void bgezc(Register rt, Label* L) { + bgezc(rt, shifted_branch_offset(L)); + } + void bgeuc(Register rs, Register rt, int16_t offset); + inline void bgeuc(Register rs, Register rt, Label* L) { + bgeuc(rs, rt, shifted_branch_offset(L)); + } + void bgec(Register rs, Register rt, int16_t offset); + inline void bgec(Register rs, Register rt, Label* L) { + bgec(rs, rt, shifted_branch_offset(L)); + } + void bgezal(Register rs, int16_t offset); + void bgezalc(Register rt, int16_t offset); + inline void bgezalc(Register rt, Label* L) { + bgezalc(rt, shifted_branch_offset(L)); + } + void bgezall(Register rs, int16_t offset); + inline void bgezall(Register rs, Label* L) { + bgezall(rs, branch_offset(L) >> 2); + } + void bgtz(Register rs, int16_t offset); + void bgtzc(Register rt, int16_t offset); + inline void bgtzc(Register rt, Label* L) { + bgtzc(rt, shifted_branch_offset(L)); + } + void blez(Register rs, int16_t offset); + void blezc(Register rt, int16_t offset); + inline void blezc(Register rt, Label* L) { + blezc(rt, shifted_branch_offset(L)); + } + void bltz(Register rs, int16_t offset); + void bltzc(Register rt, int16_t offset); + inline void bltzc(Register rt, Label* L) { + bltzc(rt, shifted_branch_offset(L)); + } + void bltuc(Register rs, Register rt, int16_t offset); + inline void bltuc(Register rs, Register rt, Label* L) { + bltuc(rs, rt, shifted_branch_offset(L)); + } + void bltc(Register rs, Register rt, int16_t offset); + inline void bltc(Register rs, Register rt, Label* L) { + bltc(rs, rt, shifted_branch_offset(L)); + } + void bltzal(Register rs, int16_t offset); + void nal() { bltzal(zero_reg, 0); } + void blezalc(Register rt, int16_t offset); + inline void blezalc(Register rt, Label* L) { + blezalc(rt, shifted_branch_offset(L)); + } + void bltzalc(Register rt, int16_t offset); + inline void bltzalc(Register rt, Label* L) { + bltzalc(rt, shifted_branch_offset(L)); + } + void bgtzalc(Register rt, int16_t offset); + inline void bgtzalc(Register rt, Label* L) { + bgtzalc(rt, shifted_branch_offset(L)); + } + void beqzalc(Register rt, int16_t offset); + inline void beqzalc(Register rt, Label* L) { + beqzalc(rt, shifted_branch_offset(L)); + } + void beqc(Register rs, Register rt, int16_t offset); + inline void beqc(Register rs, Register rt, Label* L) { + beqc(rs, rt, shifted_branch_offset(L)); + } + void beqzc(Register rs, int32_t offset); + inline void beqzc(Register rs, Label* L) { + beqzc(rs, shifted_branch_offset21(L)); + } + void bnezalc(Register rt, int16_t offset); + inline void bnezalc(Register rt, Label* L) { + bnezalc(rt, shifted_branch_offset(L)); + } + void bnec(Register rs, Register rt, int16_t offset); + inline void bnec(Register rs, Register rt, Label* L) { + bnec(rs, rt, shifted_branch_offset(L)); + } + void bnezc(Register rt, int32_t offset); + inline void bnezc(Register rt, Label* L) { + bnezc(rt, shifted_branch_offset21(L)); + } + void bne(Register rs, Register rt, int16_t offset); + inline void bne(Register rs, Register rt, Label* L) { + bne(rs, rt, shifted_branch_offset(L)); + } + void bovc(Register rs, Register rt, int16_t offset); + inline void bovc(Register rs, Register rt, Label* L) { + bovc(rs, rt, shifted_branch_offset(L)); + } + void bnvc(Register rs, Register rt, int16_t offset); + inline void bnvc(Register rs, Register rt, Label* L) { + bnvc(rs, rt, shifted_branch_offset(L)); + } + + // Never use the int16_t b(l)cond version with a branch offset + // instead of using the Label* version. + + // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. + void j(int32_t target); + void jal(int32_t target); + void jalr(Register rs, Register rd = ra); + void jr(Register target); + void jic(Register rt, int16_t offset); + void jialc(Register rt, int16_t offset); + + // -------Data-processing-instructions--------- + + // Arithmetic. + void addu(Register rd, Register rs, Register rt); + void subu(Register rd, Register rs, Register rt); + void mult(Register rs, Register rt); + void multu(Register rs, Register rt); + void div(Register rs, Register rt); + void divu(Register rs, Register rt); + void div(Register rd, Register rs, Register rt); + void divu(Register rd, Register rs, Register rt); + void mod(Register rd, Register rs, Register rt); + void modu(Register rd, Register rs, Register rt); + void mul(Register rd, Register rs, Register rt); + void muh(Register rd, Register rs, Register rt); + void mulu(Register rd, Register rs, Register rt); + void muhu(Register rd, Register rs, Register rt); + + void addiu(Register rd, Register rs, int32_t j); + + // Logical. + void and_(Register rd, Register rs, Register rt); + void or_(Register rd, Register rs, Register rt); + void xor_(Register rd, Register rs, Register rt); + void nor(Register rd, Register rs, Register rt); + + void andi(Register rd, Register rs, int32_t j); + void ori(Register rd, Register rs, int32_t j); + void xori(Register rd, Register rs, int32_t j); + void lui(Register rd, int32_t j); + void aui(Register rs, Register rt, int32_t j); + + // Shifts. + // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop + // and may cause problems in normal code. coming_from_nop makes sure this + // doesn't happen. + void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false); + void sllv(Register rd, Register rt, Register rs); + void srl(Register rd, Register rt, uint16_t sa); + void srlv(Register rd, Register rt, Register rs); + void sra(Register rt, Register rd, uint16_t sa); + void srav(Register rt, Register rd, Register rs); + void rotr(Register rd, Register rt, uint16_t sa); + void rotrv(Register rd, Register rt, Register rs); + + // ------------Memory-instructions------------- + + void lb(Register rd, const MemOperand& rs); + void lbu(Register rd, const MemOperand& rs); + void lh(Register rd, const MemOperand& rs); + void lhu(Register rd, const MemOperand& rs); + void lw(Register rd, const MemOperand& rs); + void lwl(Register rd, const MemOperand& rs); + void lwr(Register rd, const MemOperand& rs); + void sb(Register rd, const MemOperand& rs); + void sh(Register rd, const MemOperand& rs); + void sw(Register rd, const MemOperand& rs); + void swl(Register rd, const MemOperand& rs); + void swr(Register rd, const MemOperand& rs); + + // ----------Atomic instructions-------------- + + void ll(Register rd, const MemOperand& rs); + void sc(Register rd, const MemOperand& rs); + void llx(Register rd, const MemOperand& rs); + void scx(Register rd, const MemOperand& rs); + + // ---------PC-Relative-instructions----------- + + void addiupc(Register rs, int32_t imm19); + void lwpc(Register rs, int32_t offset19); + void auipc(Register rs, int16_t imm16); + void aluipc(Register rs, int16_t imm16); + + // ----------------Prefetch-------------------- + + void pref(int32_t hint, const MemOperand& rs); + + // -------------Misc-instructions-------------- + + // Break / Trap instructions. + void break_(uint32_t code, bool break_as_stop = false); + void stop(uint32_t code = kMaxStopCode); + void tge(Register rs, Register rt, uint16_t code); + void tgeu(Register rs, Register rt, uint16_t code); + void tlt(Register rs, Register rt, uint16_t code); + void tltu(Register rs, Register rt, uint16_t code); + void teq(Register rs, Register rt, uint16_t code); + void tne(Register rs, Register rt, uint16_t code); + + // Memory barrier instruction. + void sync(); + + // Move from HI/LO register. + void mfhi(Register rd); + void mflo(Register rd); + + // Set on less than. + void slt(Register rd, Register rs, Register rt); + void sltu(Register rd, Register rs, Register rt); + void slti(Register rd, Register rs, int32_t j); + void sltiu(Register rd, Register rs, int32_t j); + + // Conditional move. + void movz(Register rd, Register rs, Register rt); + void movn(Register rd, Register rs, Register rt); + void movt(Register rd, Register rs, uint16_t cc = 0); + void movf(Register rd, Register rs, uint16_t cc = 0); + + void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); + void sel_s(FPURegister fd, FPURegister fs, FPURegister ft); + void sel_d(FPURegister fd, FPURegister fs, FPURegister ft); + void seleqz(Register rd, Register rs, Register rt); + void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft); + void selnez(Register rd, Register rs, Register rt); + void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs, + FPURegister ft); + void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft); + void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft); + void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft); + void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft); + + void movz_s(FPURegister fd, FPURegister fs, Register rt); + void movz_d(FPURegister fd, FPURegister fs, Register rt); + void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0); + void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0); + void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0); + void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0); + void movn_s(FPURegister fd, FPURegister fs, Register rt); + void movn_d(FPURegister fd, FPURegister fs, Register rt); + // Bit twiddling. + void clz(Register rd, Register rs); + void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); + void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); + void bitswap(Register rd, Register rt); + void align(Register rd, Register rs, Register rt, uint8_t bp); + + void wsbh(Register rd, Register rt); + void seh(Register rd, Register rt); + void seb(Register rd, Register rt); + + // --------Coprocessor-instructions---------------- + + // Load, store, and move. + void lwc1(FPURegister fd, const MemOperand& src); + void swc1(FPURegister fs, const MemOperand& dst); + + void mtc1(Register rt, FPURegister fs); + void mthc1(Register rt, FPURegister fs); + + void mfc1(Register rt, FPURegister fs); + void mfhc1(Register rt, FPURegister fs); + + void ctc1(Register rt, FPUControlRegister fs); + void cfc1(Register rt, FPUControlRegister fs); + + // Arithmetic. + void add_s(FPURegister fd, FPURegister fs, FPURegister ft); + void add_d(FPURegister fd, FPURegister fs, FPURegister ft); + void sub_s(FPURegister fd, FPURegister fs, FPURegister ft); + void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); + void mul_s(FPURegister fd, FPURegister fs, FPURegister ft); + void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); + void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft); + void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft); + void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft); + void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft); + void div_s(FPURegister fd, FPURegister fs, FPURegister ft); + void div_d(FPURegister fd, FPURegister fs, FPURegister ft); + void abs_s(FPURegister fd, FPURegister fs); + void abs_d(FPURegister fd, FPURegister fs); + void mov_d(FPURegister fd, FPURegister fs); + void mov_s(FPURegister fd, FPURegister fs); + void neg_s(FPURegister fd, FPURegister fs); + void neg_d(FPURegister fd, FPURegister fs); + void sqrt_s(FPURegister fd, FPURegister fs); + void sqrt_d(FPURegister fd, FPURegister fs); + void rsqrt_s(FPURegister fd, FPURegister fs); + void rsqrt_d(FPURegister fd, FPURegister fs); + void recip_d(FPURegister fd, FPURegister fs); + void recip_s(FPURegister fd, FPURegister fs); + + // Conversion. + void cvt_w_s(FPURegister fd, FPURegister fs); + void cvt_w_d(FPURegister fd, FPURegister fs); + void trunc_w_s(FPURegister fd, FPURegister fs); + void trunc_w_d(FPURegister fd, FPURegister fs); + void round_w_s(FPURegister fd, FPURegister fs); + void round_w_d(FPURegister fd, FPURegister fs); + void floor_w_s(FPURegister fd, FPURegister fs); + void floor_w_d(FPURegister fd, FPURegister fs); + void ceil_w_s(FPURegister fd, FPURegister fs); + void ceil_w_d(FPURegister fd, FPURegister fs); + void rint_s(FPURegister fd, FPURegister fs); + void rint_d(FPURegister fd, FPURegister fs); + void rint(SecondaryField fmt, FPURegister fd, FPURegister fs); + + void cvt_l_s(FPURegister fd, FPURegister fs); + void cvt_l_d(FPURegister fd, FPURegister fs); + void trunc_l_s(FPURegister fd, FPURegister fs); + void trunc_l_d(FPURegister fd, FPURegister fs); + void round_l_s(FPURegister fd, FPURegister fs); + void round_l_d(FPURegister fd, FPURegister fs); + void floor_l_s(FPURegister fd, FPURegister fs); + void floor_l_d(FPURegister fd, FPURegister fs); + void ceil_l_s(FPURegister fd, FPURegister fs); + void ceil_l_d(FPURegister fd, FPURegister fs); + + void class_s(FPURegister fd, FPURegister fs); + void class_d(FPURegister fd, FPURegister fs); + + void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); + void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); + void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); + void maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); + void min_s(FPURegister fd, FPURegister fs, FPURegister ft); + void min_d(FPURegister fd, FPURegister fs, FPURegister ft); + void max_s(FPURegister fd, FPURegister fs, FPURegister ft); + void max_d(FPURegister fd, FPURegister fs, FPURegister ft); + void mina_s(FPURegister fd, FPURegister fs, FPURegister ft); + void mina_d(FPURegister fd, FPURegister fs, FPURegister ft); + void maxa_s(FPURegister fd, FPURegister fs, FPURegister ft); + void maxa_d(FPURegister fd, FPURegister fs, FPURegister ft); + + void cvt_s_w(FPURegister fd, FPURegister fs); + void cvt_s_l(FPURegister fd, FPURegister fs); + void cvt_s_d(FPURegister fd, FPURegister fs); + + void cvt_d_w(FPURegister fd, FPURegister fs); + void cvt_d_l(FPURegister fd, FPURegister fs); + void cvt_d_s(FPURegister fd, FPURegister fs); + + // Conditions and branches for MIPSr6. + void cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd, + FPURegister ft, FPURegister fs); + void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); + void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); + + void bc1eqz(int16_t offset, FPURegister ft); + inline void bc1eqz(Label* L, FPURegister ft) { + bc1eqz(shifted_branch_offset(L), ft); + } + void bc1nez(int16_t offset, FPURegister ft); + inline void bc1nez(Label* L, FPURegister ft) { + bc1nez(shifted_branch_offset(L), ft); + } + + // Conditions and branches for non MIPSr6. + void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, + uint16_t cc = 0); + void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); + void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); + + void bc1f(int16_t offset, uint16_t cc = 0); + inline void bc1f(Label* L, uint16_t cc = 0) { + bc1f(shifted_branch_offset(L), cc); + } + void bc1t(int16_t offset, uint16_t cc = 0); + inline void bc1t(Label* L, uint16_t cc = 0) { + bc1t(shifted_branch_offset(L), cc); + } + void fcmp(FPURegister src1, const double src2, FPUCondition cond); + + // MSA instructions + void bz_v(MSARegister wt, int16_t offset); + inline void bz_v(MSARegister wt, Label* L) { + bz_v(wt, shifted_branch_offset(L)); + } + void bz_b(MSARegister wt, int16_t offset); + inline void bz_b(MSARegister wt, Label* L) { + bz_b(wt, shifted_branch_offset(L)); + } + void bz_h(MSARegister wt, int16_t offset); + inline void bz_h(MSARegister wt, Label* L) { + bz_h(wt, shifted_branch_offset(L)); + } + void bz_w(MSARegister wt, int16_t offset); + inline void bz_w(MSARegister wt, Label* L) { + bz_w(wt, shifted_branch_offset(L)); + } + void bz_d(MSARegister wt, int16_t offset); + inline void bz_d(MSARegister wt, Label* L) { + bz_d(wt, shifted_branch_offset(L)); + } + void bnz_v(MSARegister wt, int16_t offset); + inline void bnz_v(MSARegister wt, Label* L) { + bnz_v(wt, shifted_branch_offset(L)); + } + void bnz_b(MSARegister wt, int16_t offset); + inline void bnz_b(MSARegister wt, Label* L) { + bnz_b(wt, shifted_branch_offset(L)); + } + void bnz_h(MSARegister wt, int16_t offset); + inline void bnz_h(MSARegister wt, Label* L) { + bnz_h(wt, shifted_branch_offset(L)); + } + void bnz_w(MSARegister wt, int16_t offset); + inline void bnz_w(MSARegister wt, Label* L) { + bnz_w(wt, shifted_branch_offset(L)); + } + void bnz_d(MSARegister wt, int16_t offset); + inline void bnz_d(MSARegister wt, Label* L) { + bnz_d(wt, shifted_branch_offset(L)); + } + + void ld_b(MSARegister wd, const MemOperand& rs); + void ld_h(MSARegister wd, const MemOperand& rs); + void ld_w(MSARegister wd, const MemOperand& rs); + void ld_d(MSARegister wd, const MemOperand& rs); + void st_b(MSARegister wd, const MemOperand& rs); + void st_h(MSARegister wd, const MemOperand& rs); + void st_w(MSARegister wd, const MemOperand& rs); + void st_d(MSARegister wd, const MemOperand& rs); + + void ldi_b(MSARegister wd, int32_t imm10); + void ldi_h(MSARegister wd, int32_t imm10); + void ldi_w(MSARegister wd, int32_t imm10); + void ldi_d(MSARegister wd, int32_t imm10); + + void addvi_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void addvi_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void addvi_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void addvi_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void subvi_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void subvi_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void subvi_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void subvi_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void maxi_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void mini_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void ceqi_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void ceqi_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void ceqi_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void ceqi_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void clti_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); + void clei_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); + + void andi_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void ori_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void nori_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void xori_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void bmnzi_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void bmzi_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void bseli_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void shf_b(MSARegister wd, MSARegister ws, uint32_t imm8); + void shf_h(MSARegister wd, MSARegister ws, uint32_t imm8); + void shf_w(MSARegister wd, MSARegister ws, uint32_t imm8); + + void and_v(MSARegister wd, MSARegister ws, MSARegister wt); + void or_v(MSARegister wd, MSARegister ws, MSARegister wt); + void nor_v(MSARegister wd, MSARegister ws, MSARegister wt); + void xor_v(MSARegister wd, MSARegister ws, MSARegister wt); + void bmnz_v(MSARegister wd, MSARegister ws, MSARegister wt); + void bmz_v(MSARegister wd, MSARegister ws, MSARegister wt); + void bsel_v(MSARegister wd, MSARegister ws, MSARegister wt); + + void fill_b(MSARegister wd, Register rs); + void fill_h(MSARegister wd, Register rs); + void fill_w(MSARegister wd, Register rs); + void pcnt_b(MSARegister wd, MSARegister ws); + void pcnt_h(MSARegister wd, MSARegister ws); + void pcnt_w(MSARegister wd, MSARegister ws); + void pcnt_d(MSARegister wd, MSARegister ws); + void nloc_b(MSARegister wd, MSARegister ws); + void nloc_h(MSARegister wd, MSARegister ws); + void nloc_w(MSARegister wd, MSARegister ws); + void nloc_d(MSARegister wd, MSARegister ws); + void nlzc_b(MSARegister wd, MSARegister ws); + void nlzc_h(MSARegister wd, MSARegister ws); + void nlzc_w(MSARegister wd, MSARegister ws); + void nlzc_d(MSARegister wd, MSARegister ws); + + void fclass_w(MSARegister wd, MSARegister ws); + void fclass_d(MSARegister wd, MSARegister ws); + void ftrunc_s_w(MSARegister wd, MSARegister ws); + void ftrunc_s_d(MSARegister wd, MSARegister ws); + void ftrunc_u_w(MSARegister wd, MSARegister ws); + void ftrunc_u_d(MSARegister wd, MSARegister ws); + void fsqrt_w(MSARegister wd, MSARegister ws); + void fsqrt_d(MSARegister wd, MSARegister ws); + void frsqrt_w(MSARegister wd, MSARegister ws); + void frsqrt_d(MSARegister wd, MSARegister ws); + void frcp_w(MSARegister wd, MSARegister ws); + void frcp_d(MSARegister wd, MSARegister ws); + void frint_w(MSARegister wd, MSARegister ws); + void frint_d(MSARegister wd, MSARegister ws); + void flog2_w(MSARegister wd, MSARegister ws); + void flog2_d(MSARegister wd, MSARegister ws); + void fexupl_w(MSARegister wd, MSARegister ws); + void fexupl_d(MSARegister wd, MSARegister ws); + void fexupr_w(MSARegister wd, MSARegister ws); + void fexupr_d(MSARegister wd, MSARegister ws); + void ffql_w(MSARegister wd, MSARegister ws); + void ffql_d(MSARegister wd, MSARegister ws); + void ffqr_w(MSARegister wd, MSARegister ws); + void ffqr_d(MSARegister wd, MSARegister ws); + void ftint_s_w(MSARegister wd, MSARegister ws); + void ftint_s_d(MSARegister wd, MSARegister ws); + void ftint_u_w(MSARegister wd, MSARegister ws); + void ftint_u_d(MSARegister wd, MSARegister ws); + void ffint_s_w(MSARegister wd, MSARegister ws); + void ffint_s_d(MSARegister wd, MSARegister ws); + void ffint_u_w(MSARegister wd, MSARegister ws); + void ffint_u_d(MSARegister wd, MSARegister ws); + + void sll_b(MSARegister wd, MSARegister ws, MSARegister wt); + void sll_h(MSARegister wd, MSARegister ws, MSARegister wt); + void sll_w(MSARegister wd, MSARegister ws, MSARegister wt); + void sll_d(MSARegister wd, MSARegister ws, MSARegister wt); + void sra_b(MSARegister wd, MSARegister ws, MSARegister wt); + void sra_h(MSARegister wd, MSARegister ws, MSARegister wt); + void sra_w(MSARegister wd, MSARegister ws, MSARegister wt); + void sra_d(MSARegister wd, MSARegister ws, MSARegister wt); + void srl_b(MSARegister wd, MSARegister ws, MSARegister wt); + void srl_h(MSARegister wd, MSARegister ws, MSARegister wt); + void srl_w(MSARegister wd, MSARegister ws, MSARegister wt); + void srl_d(MSARegister wd, MSARegister ws, MSARegister wt); + void bclr_b(MSARegister wd, MSARegister ws, MSARegister wt); + void bclr_h(MSARegister wd, MSARegister ws, MSARegister wt); + void bclr_w(MSARegister wd, MSARegister ws, MSARegister wt); + void bclr_d(MSARegister wd, MSARegister ws, MSARegister wt); + void bset_b(MSARegister wd, MSARegister ws, MSARegister wt); + void bset_h(MSARegister wd, MSARegister ws, MSARegister wt); + void bset_w(MSARegister wd, MSARegister ws, MSARegister wt); + void bset_d(MSARegister wd, MSARegister ws, MSARegister wt); + void bneg_b(MSARegister wd, MSARegister ws, MSARegister wt); + void bneg_h(MSARegister wd, MSARegister ws, MSARegister wt); + void bneg_w(MSARegister wd, MSARegister ws, MSARegister wt); + void bneg_d(MSARegister wd, MSARegister ws, MSARegister wt); + void binsl_b(MSARegister wd, MSARegister ws, MSARegister wt); + void binsl_h(MSARegister wd, MSARegister ws, MSARegister wt); + void binsl_w(MSARegister wd, MSARegister ws, MSARegister wt); + void binsl_d(MSARegister wd, MSARegister ws, MSARegister wt); + void binsr_b(MSARegister wd, MSARegister ws, MSARegister wt); + void binsr_h(MSARegister wd, MSARegister ws, MSARegister wt); + void binsr_w(MSARegister wd, MSARegister ws, MSARegister wt); + void binsr_d(MSARegister wd, MSARegister ws, MSARegister wt); + void addv_b(MSARegister wd, MSARegister ws, MSARegister wt); + void addv_h(MSARegister wd, MSARegister ws, MSARegister wt); + void addv_w(MSARegister wd, MSARegister ws, MSARegister wt); + void addv_d(MSARegister wd, MSARegister ws, MSARegister wt); + void subv_b(MSARegister wd, MSARegister ws, MSARegister wt); + void subv_h(MSARegister wd, MSARegister ws, MSARegister wt); + void subv_w(MSARegister wd, MSARegister ws, MSARegister wt); + void subv_d(MSARegister wd, MSARegister ws, MSARegister wt); + void max_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void max_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void max_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void max_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void max_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void max_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void max_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void max_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void min_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void min_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void min_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void min_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void min_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void min_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void min_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void min_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void max_a_b(MSARegister wd, MSARegister ws, MSARegister wt); + void max_a_h(MSARegister wd, MSARegister ws, MSARegister wt); + void max_a_w(MSARegister wd, MSARegister ws, MSARegister wt); + void max_a_d(MSARegister wd, MSARegister ws, MSARegister wt); + void min_a_b(MSARegister wd, MSARegister ws, MSARegister wt); + void min_a_h(MSARegister wd, MSARegister ws, MSARegister wt); + void min_a_w(MSARegister wd, MSARegister ws, MSARegister wt); + void min_a_d(MSARegister wd, MSARegister ws, MSARegister wt); + void ceq_b(MSARegister wd, MSARegister ws, MSARegister wt); + void ceq_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ceq_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ceq_d(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void clt_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void cle_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void add_a_b(MSARegister wd, MSARegister ws, MSARegister wt); + void add_a_h(MSARegister wd, MSARegister ws, MSARegister wt); + void add_a_w(MSARegister wd, MSARegister ws, MSARegister wt); + void add_a_d(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_a_b(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_a_h(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_a_w(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_a_d(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void adds_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ave_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void aver_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void subs_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void subsus_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void subsuu_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void asub_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void mulv_b(MSARegister wd, MSARegister ws, MSARegister wt); + void mulv_h(MSARegister wd, MSARegister ws, MSARegister wt); + void mulv_w(MSARegister wd, MSARegister ws, MSARegister wt); + void mulv_d(MSARegister wd, MSARegister ws, MSARegister wt); + void maddv_b(MSARegister wd, MSARegister ws, MSARegister wt); + void maddv_h(MSARegister wd, MSARegister ws, MSARegister wt); + void maddv_w(MSARegister wd, MSARegister ws, MSARegister wt); + void maddv_d(MSARegister wd, MSARegister ws, MSARegister wt); + void msubv_b(MSARegister wd, MSARegister ws, MSARegister wt); + void msubv_h(MSARegister wd, MSARegister ws, MSARegister wt); + void msubv_w(MSARegister wd, MSARegister ws, MSARegister wt); + void msubv_d(MSARegister wd, MSARegister ws, MSARegister wt); + void div_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void div_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void div_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void div_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void div_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void div_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void div_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void div_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void mod_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void dotp_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void dpadd_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void dpsub_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void sld_b(MSARegister wd, MSARegister ws, Register rt); + void sld_h(MSARegister wd, MSARegister ws, Register rt); + void sld_w(MSARegister wd, MSARegister ws, Register rt); + void sld_d(MSARegister wd, MSARegister ws, Register rt); + void splat_b(MSARegister wd, MSARegister ws, Register rt); + void splat_h(MSARegister wd, MSARegister ws, Register rt); + void splat_w(MSARegister wd, MSARegister ws, Register rt); + void splat_d(MSARegister wd, MSARegister ws, Register rt); + void pckev_b(MSARegister wd, MSARegister ws, MSARegister wt); + void pckev_h(MSARegister wd, MSARegister ws, MSARegister wt); + void pckev_w(MSARegister wd, MSARegister ws, MSARegister wt); + void pckev_d(MSARegister wd, MSARegister ws, MSARegister wt); + void pckod_b(MSARegister wd, MSARegister ws, MSARegister wt); + void pckod_h(MSARegister wd, MSARegister ws, MSARegister wt); + void pckod_w(MSARegister wd, MSARegister ws, MSARegister wt); + void pckod_d(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvl_b(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvl_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvl_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvl_d(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvr_b(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvr_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvr_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvr_d(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvev_b(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvev_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvev_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvev_d(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvod_b(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvod_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvod_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ilvod_d(MSARegister wd, MSARegister ws, MSARegister wt); + void vshf_b(MSARegister wd, MSARegister ws, MSARegister wt); + void vshf_h(MSARegister wd, MSARegister ws, MSARegister wt); + void vshf_w(MSARegister wd, MSARegister ws, MSARegister wt); + void vshf_d(MSARegister wd, MSARegister ws, MSARegister wt); + void srar_b(MSARegister wd, MSARegister ws, MSARegister wt); + void srar_h(MSARegister wd, MSARegister ws, MSARegister wt); + void srar_w(MSARegister wd, MSARegister ws, MSARegister wt); + void srar_d(MSARegister wd, MSARegister ws, MSARegister wt); + void srlr_b(MSARegister wd, MSARegister ws, MSARegister wt); + void srlr_h(MSARegister wd, MSARegister ws, MSARegister wt); + void srlr_w(MSARegister wd, MSARegister ws, MSARegister wt); + void srlr_d(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void hadd_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_s_b(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_s_h(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_s_w(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_s_d(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_u_b(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_u_h(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_u_w(MSARegister wd, MSARegister ws, MSARegister wt); + void hsub_u_d(MSARegister wd, MSARegister ws, MSARegister wt); + + void fcaf_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcaf_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcun_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcun_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fceq_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fceq_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcueq_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcueq_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fclt_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fclt_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcult_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcult_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcle_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcle_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcule_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcule_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsaf_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsaf_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsun_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsun_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fseq_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fseq_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsueq_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsueq_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fslt_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fslt_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsult_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsult_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsle_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsle_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsule_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsule_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fadd_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fadd_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsub_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsub_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fmul_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmul_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fdiv_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fdiv_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fmadd_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmadd_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fmsub_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmsub_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fexp2_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fexp2_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fexdo_h(MSARegister wd, MSARegister ws, MSARegister wt); + void fexdo_w(MSARegister wd, MSARegister ws, MSARegister wt); + void ftq_h(MSARegister wd, MSARegister ws, MSARegister wt); + void ftq_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmin_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmin_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fmin_a_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmin_a_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fmax_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmax_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fmax_a_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fmax_a_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcor_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcor_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcune_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcune_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fcne_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fcne_d(MSARegister wd, MSARegister ws, MSARegister wt); + void mul_q_h(MSARegister wd, MSARegister ws, MSARegister wt); + void mul_q_w(MSARegister wd, MSARegister ws, MSARegister wt); + void madd_q_h(MSARegister wd, MSARegister ws, MSARegister wt); + void madd_q_w(MSARegister wd, MSARegister ws, MSARegister wt); + void msub_q_h(MSARegister wd, MSARegister ws, MSARegister wt); + void msub_q_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsor_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsor_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsune_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsune_d(MSARegister wd, MSARegister ws, MSARegister wt); + void fsne_w(MSARegister wd, MSARegister ws, MSARegister wt); + void fsne_d(MSARegister wd, MSARegister ws, MSARegister wt); + void mulr_q_h(MSARegister wd, MSARegister ws, MSARegister wt); + void mulr_q_w(MSARegister wd, MSARegister ws, MSARegister wt); + void maddr_q_h(MSARegister wd, MSARegister ws, MSARegister wt); + void maddr_q_w(MSARegister wd, MSARegister ws, MSARegister wt); + void msubr_q_h(MSARegister wd, MSARegister ws, MSARegister wt); + void msubr_q_w(MSARegister wd, MSARegister ws, MSARegister wt); + + void sldi_b(MSARegister wd, MSARegister ws, uint32_t n); + void sldi_h(MSARegister wd, MSARegister ws, uint32_t n); + void sldi_w(MSARegister wd, MSARegister ws, uint32_t n); + void sldi_d(MSARegister wd, MSARegister ws, uint32_t n); + void splati_b(MSARegister wd, MSARegister ws, uint32_t n); + void splati_h(MSARegister wd, MSARegister ws, uint32_t n); + void splati_w(MSARegister wd, MSARegister ws, uint32_t n); + void splati_d(MSARegister wd, MSARegister ws, uint32_t n); + void copy_s_b(Register rd, MSARegister ws, uint32_t n); + void copy_s_h(Register rd, MSARegister ws, uint32_t n); + void copy_s_w(Register rd, MSARegister ws, uint32_t n); + void copy_u_b(Register rd, MSARegister ws, uint32_t n); + void copy_u_h(Register rd, MSARegister ws, uint32_t n); + void copy_u_w(Register rd, MSARegister ws, uint32_t n); + void insert_b(MSARegister wd, uint32_t n, Register rs); + void insert_h(MSARegister wd, uint32_t n, Register rs); + void insert_w(MSARegister wd, uint32_t n, Register rs); + void insve_b(MSARegister wd, uint32_t n, MSARegister ws); + void insve_h(MSARegister wd, uint32_t n, MSARegister ws); + void insve_w(MSARegister wd, uint32_t n, MSARegister ws); + void insve_d(MSARegister wd, uint32_t n, MSARegister ws); + void move_v(MSARegister wd, MSARegister ws); + void ctcmsa(MSAControlRegister cd, Register rs); + void cfcmsa(Register rd, MSAControlRegister cs); + + void slli_b(MSARegister wd, MSARegister ws, uint32_t m); + void slli_h(MSARegister wd, MSARegister ws, uint32_t m); + void slli_w(MSARegister wd, MSARegister ws, uint32_t m); + void slli_d(MSARegister wd, MSARegister ws, uint32_t m); + void srai_b(MSARegister wd, MSARegister ws, uint32_t m); + void srai_h(MSARegister wd, MSARegister ws, uint32_t m); + void srai_w(MSARegister wd, MSARegister ws, uint32_t m); + void srai_d(MSARegister wd, MSARegister ws, uint32_t m); + void srli_b(MSARegister wd, MSARegister ws, uint32_t m); + void srli_h(MSARegister wd, MSARegister ws, uint32_t m); + void srli_w(MSARegister wd, MSARegister ws, uint32_t m); + void srli_d(MSARegister wd, MSARegister ws, uint32_t m); + void bclri_b(MSARegister wd, MSARegister ws, uint32_t m); + void bclri_h(MSARegister wd, MSARegister ws, uint32_t m); + void bclri_w(MSARegister wd, MSARegister ws, uint32_t m); + void bclri_d(MSARegister wd, MSARegister ws, uint32_t m); + void bseti_b(MSARegister wd, MSARegister ws, uint32_t m); + void bseti_h(MSARegister wd, MSARegister ws, uint32_t m); + void bseti_w(MSARegister wd, MSARegister ws, uint32_t m); + void bseti_d(MSARegister wd, MSARegister ws, uint32_t m); + void bnegi_b(MSARegister wd, MSARegister ws, uint32_t m); + void bnegi_h(MSARegister wd, MSARegister ws, uint32_t m); + void bnegi_w(MSARegister wd, MSARegister ws, uint32_t m); + void bnegi_d(MSARegister wd, MSARegister ws, uint32_t m); + void binsli_b(MSARegister wd, MSARegister ws, uint32_t m); + void binsli_h(MSARegister wd, MSARegister ws, uint32_t m); + void binsli_w(MSARegister wd, MSARegister ws, uint32_t m); + void binsli_d(MSARegister wd, MSARegister ws, uint32_t m); + void binsri_b(MSARegister wd, MSARegister ws, uint32_t m); + void binsri_h(MSARegister wd, MSARegister ws, uint32_t m); + void binsri_w(MSARegister wd, MSARegister ws, uint32_t m); + void binsri_d(MSARegister wd, MSARegister ws, uint32_t m); + void sat_s_b(MSARegister wd, MSARegister ws, uint32_t m); + void sat_s_h(MSARegister wd, MSARegister ws, uint32_t m); + void sat_s_w(MSARegister wd, MSARegister ws, uint32_t m); + void sat_s_d(MSARegister wd, MSARegister ws, uint32_t m); + void sat_u_b(MSARegister wd, MSARegister ws, uint32_t m); + void sat_u_h(MSARegister wd, MSARegister ws, uint32_t m); + void sat_u_w(MSARegister wd, MSARegister ws, uint32_t m); + void sat_u_d(MSARegister wd, MSARegister ws, uint32_t m); + void srari_b(MSARegister wd, MSARegister ws, uint32_t m); + void srari_h(MSARegister wd, MSARegister ws, uint32_t m); + void srari_w(MSARegister wd, MSARegister ws, uint32_t m); + void srari_d(MSARegister wd, MSARegister ws, uint32_t m); + void srlri_b(MSARegister wd, MSARegister ws, uint32_t m); + void srlri_h(MSARegister wd, MSARegister ws, uint32_t m); + void srlri_w(MSARegister wd, MSARegister ws, uint32_t m); + void srlri_d(MSARegister wd, MSARegister ws, uint32_t m); + + // Check the code size generated from label to here. + int SizeOfCodeGeneratedSince(Label* label) { + return pc_offset() - label->pos(); + } + + // Check the number of instructions generated from label to here. + int InstructionsGeneratedSince(Label* label) { + return SizeOfCodeGeneratedSince(label) / kInstrSize; + } + + // Class for scoping postponing the trampoline pool generation. + class V8_NODISCARD BlockTrampolinePoolScope { + public: + explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { + assem_->StartBlockTrampolinePool(); + } + ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } + + private: + Assembler* assem_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); + }; + + // Class for postponing the assembly buffer growth. Typically used for + // sequences of instructions that must be emitted as a unit, before + // buffer growth (and relocation) can occur. + // This blocking scope is not nestable. + class V8_NODISCARD BlockGrowBufferScope { + public: + explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { + assem_->StartBlockGrowBuffer(); + } + ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); } + + private: + Assembler* assem_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); + }; + + // Record a deoptimization reason that can be used by a log or cpu profiler. + // Use --trace-deopt to enable. + void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, + SourcePosition position, int id); + + static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta); + + static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta); + + // Writes a single byte or word of data in the code stream. Used for + // inline tables, e.g., jump-tables. + void db(uint8_t data); + void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); + void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); + void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { + dd(data, rmode); + } + void dd(Label* label); + + // Postpone the generation of the trampoline pool for the specified number of + // instructions. + void BlockTrampolinePoolFor(int instructions); + + // Check if there is less than kGap bytes available in the buffer. + // If this is the case, we need to grow the buffer before emitting + // an instruction or relocation information. + inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } + + // Get the number of bytes available in the buffer. + inline int available_space() const { return reloc_info_writer.pos() - pc_; } + + // Read/patch instructions. + static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } + static void instr_at_put(Address pc, Instr instr) { + *reinterpret_cast(pc) = instr; + } + Instr instr_at(int pos) { + return *reinterpret_cast(buffer_start_ + pos); + } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast(buffer_start_ + pos) = instr; + } + + // Check if an instruction is a branch of some kind. + static bool IsBranch(Instr instr); + static bool IsMsaBranch(Instr instr); + static bool IsBc(Instr instr); + static bool IsNal(Instr instr); + static bool IsBzc(Instr instr); + static bool IsBeq(Instr instr); + static bool IsBne(Instr instr); + static bool IsBeqzc(Instr instr); + static bool IsBnezc(Instr instr); + static bool IsBeqc(Instr instr); + static bool IsBnec(Instr instr); + static bool IsJicOrJialc(Instr instr); + static bool IsMov(Instr instr, Register rd, Register rs); + + static bool IsJump(Instr instr); + static bool IsJ(Instr instr); + static bool IsLui(Instr instr); + static bool IsOri(Instr instr); + static bool IsAddu(Instr instr, Register rd, Register rs, Register rt); + + static bool IsJal(Instr instr); + static bool IsJr(Instr instr); + static bool IsJalr(Instr instr); + + static bool IsNop(Instr instr, unsigned int type); + static bool IsPop(Instr instr); + static bool IsPush(Instr instr); + static bool IsLwRegFpOffset(Instr instr); + static bool IsSwRegFpOffset(Instr instr); + static bool IsLwRegFpNegOffset(Instr instr); + static bool IsSwRegFpNegOffset(Instr instr); + + static Register GetRtReg(Instr instr); + static Register GetRsReg(Instr instr); + static Register GetRdReg(Instr instr); + + static uint32_t GetRt(Instr instr); + static uint32_t GetRtField(Instr instr); + static uint32_t GetRs(Instr instr); + static uint32_t GetRsField(Instr instr); + static uint32_t GetRd(Instr instr); + static uint32_t GetRdField(Instr instr); + static uint32_t GetSa(Instr instr); + static uint32_t GetSaField(Instr instr); + static uint32_t GetOpcodeField(Instr instr); + static uint32_t GetFunction(Instr instr); + static uint32_t GetFunctionField(Instr instr); + static uint32_t GetImmediate16(Instr instr); + static uint32_t GetLabelConst(Instr instr); + + static int32_t GetBranchOffset(Instr instr); + static bool IsLw(Instr instr); + static int16_t GetLwOffset(Instr instr); + static int16_t GetJicOrJialcOffset(Instr instr); + static int16_t GetLuiOffset(Instr instr); + static Instr SetLwOffset(Instr instr, int16_t offset); + + static bool IsSw(Instr instr); + static Instr SetSwOffset(Instr instr, int16_t offset); + static bool IsAddImmediate(Instr instr); + static Instr SetAddImmediateOffset(Instr instr, int16_t offset); + static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic); + static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset, + int16_t* jic_offset); + static void UnpackTargetAddressUnsigned(uint32_t address, + uint32_t* lui_offset, + uint32_t* jic_offset); + + static bool IsAndImmediate(Instr instr); + static bool IsEmittedConstant(Instr instr); + + void CheckTrampolinePool(); + + bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; } + static bool IsCompactBranchSupported() { + return IsMipsArchVariant(kMips32r6); + } + + // Get the code target object for a pc-relative call or jump. + V8_INLINE Handle relative_code_target_object_handle_at( + Address pc_) const; + + inline int UnboundLabelsCount() { return unbound_labels_count_; } + + bool is_trampoline_emitted() const { return trampoline_emitted_; } + + protected: + // Load Scaled Address instruction. + void lsa(Register rd, Register rt, Register rs, uint8_t sa); + + // Readable constants for base and offset adjustment helper, these indicate if + // aside from offset, another value like offset + 4 should fit into int16. + enum class OffsetAccessType : bool { + SINGLE_ACCESS = false, + TWO_ACCESSES = true + }; + + // Helper function for memory load/store using base register and offset. + void AdjustBaseAndOffset( + MemOperand* src, + OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, + int second_access_add_to_offset = 4); + + int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } + + // Decode branch instruction at pos and return branch target pos. + int target_at(int pos, bool is_internal); + + // Patch branch instruction at pos to branch to given branch target pos. + void target_at_put(int pos, int target_pos, bool is_internal); + + // Say if we need to relocate with this mode. + bool MustUseReg(RelocInfo::Mode rmode); + + // Record reloc info for current pc_. + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + + // Read 32-bit immediate from lui, ori pair that is used to load immediate. + static int32_t GetLuiOriImmediate(Instr instr1, Instr instr2); + + // Block the emission of the trampoline pool before pc_offset. + void BlockTrampolinePoolBefore(int pc_offset) { + if (no_trampoline_pool_before_ < pc_offset) + no_trampoline_pool_before_ = pc_offset; + } + + void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } + + void EndBlockTrampolinePool() { + trampoline_pool_blocked_nesting_--; + if (trampoline_pool_blocked_nesting_ == 0) { + CheckTrampolinePoolQuick(1); + } + } + + bool is_trampoline_pool_blocked() const { + return trampoline_pool_blocked_nesting_ > 0; + } + + bool has_exception() const { return internal_trampoline_exception_; } + + // Temporarily block automatic assembly buffer growth. + void StartBlockGrowBuffer() { + DCHECK(!block_buffer_growth_); + block_buffer_growth_ = true; + } + + void EndBlockGrowBuffer() { + DCHECK(block_buffer_growth_); + block_buffer_growth_ = false; + } + + bool is_buffer_growth_blocked() const { return block_buffer_growth_; } + + void EmitForbiddenSlotInstruction() { + if (IsPrevInstrCompactBranch()) { + nop(); + } + } + + inline void CheckTrampolinePoolQuick(int extra_instructions = 0) { + if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { + CheckTrampolinePool(); + } + } + + inline void CheckBuffer(); + + RegList scratch_register_list_; + + // Generate common instruction sequence. + void GenPCRelativeJump(Register tf, Register ts, int32_t imm32, + RelocInfo::Mode rmode, BranchDelaySlot bdslot); + void GenPCRelativeJumpAndLink(Register t, int32_t imm32, + RelocInfo::Mode rmode, BranchDelaySlot bdslot); + + void set_pc_for_safepoint() { pc_for_safepoint_ = pc_; } + + private: + // Avoid overflows for displacements etc. + static const int kMaximalBufferSize = 512 * MB; + + inline static void set_target_internal_reference_encoded_at(Address pc, + Address target); + + // Buffer size and constant pool distance are checked together at regular + // intervals of kBufferCheckInterval emitted bytes. + static constexpr int kBufferCheckInterval = 1 * KB / 2; + + // Code generation. + // The relocation writer's position is at least kGap bytes below the end of + // the generated instructions. This is so that multi-instruction sequences do + // not have to check for overflow. The same is true for writes of large + // relocation info entries. + static constexpr int kGap = 32; + STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); + + // Repeated checking whether the trampoline pool should be emitted is rather + // expensive. By default we only check again once a number of instructions + // has been generated. + static constexpr int kCheckConstIntervalInst = 32; + static constexpr int kCheckConstInterval = + kCheckConstIntervalInst * kInstrSize; + + int next_buffer_check_; // pc offset of next buffer check. + + // Emission of the trampoline pool may be blocked in some code sequences. + int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. + int no_trampoline_pool_before_; // Block emission before this pc offset. + + // Keep track of the last emitted pool to guarantee a maximal distance. + int last_trampoline_pool_end_; // pc offset of the end of the last pool. + + // Automatic growth of the assembly buffer may be blocked for some sequences. + bool block_buffer_growth_; // Block growth when true. + + // Relocation information generation. + // Each relocation is encoded as a variable size value. + static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; + RelocInfoWriter reloc_info_writer; + + // The bound position, before this we cannot do instruction elimination. + int last_bound_pos_; + + // Readable constants for compact branch handling in emit() + enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true }; + + // Code emission. + void GrowBuffer(); + inline void emit(Instr x, + CompactBranchType is_compact_branch = CompactBranchType::NO); + inline void emit(uint64_t x); + inline void CheckForEmitInForbiddenSlot(); + template + inline void EmitHelper(T x); + inline void EmitHelper(Instr x, CompactBranchType is_compact_branch); + + // Instruction generation. + // We have 3 different kind of encoding layout on MIPS. + // However due to many different types of objects encoded in the same fields + // we have quite a few aliases for each mode. + // Using the same structure to refer to Register and FPURegister would spare a + // few aliases, but mixing both does not look clean to me. + // Anyway we could surely implement this differently. + + void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd, + uint16_t sa = 0, SecondaryField func = nullptrSF); + + void GenInstrRegister(Opcode opcode, Register rs, Register rt, uint16_t msb, + uint16_t lsb, SecondaryField func); + + void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft, + FPURegister fs, FPURegister fd, + SecondaryField func = nullptrSF); + + void GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft, + FPURegister fs, FPURegister fd, + SecondaryField func = nullptrSF); + + void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, + FPURegister fs, FPURegister fd, + SecondaryField func = nullptrSF); + + void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, + FPUControlRegister fs, SecondaryField func = nullptrSF); + + void GenInstrImmediate( + Opcode opcode, Register rs, Register rt, int32_t j, + CompactBranchType is_compact_branch = CompactBranchType::NO); + void GenInstrImmediate( + Opcode opcode, Register rs, SecondaryField SF, int32_t j, + CompactBranchType is_compact_branch = CompactBranchType::NO); + void GenInstrImmediate( + Opcode opcode, Register r1, FPURegister r2, int32_t j, + CompactBranchType is_compact_branch = CompactBranchType::NO); + void GenInstrImmediate(Opcode opcode, Register base, Register rt, + int32_t offset9, int bit6, SecondaryField func); + void GenInstrImmediate( + Opcode opcode, Register rs, int32_t offset21, + CompactBranchType is_compact_branch = CompactBranchType::NO); + void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21); + void GenInstrImmediate( + Opcode opcode, int32_t offset26, + CompactBranchType is_compact_branch = CompactBranchType::NO); + + void GenInstrJump(Opcode opcode, uint32_t address); + + // MSA + void GenInstrMsaI8(SecondaryField operation, uint32_t imm8, MSARegister ws, + MSARegister wd); + + void GenInstrMsaI5(SecondaryField operation, SecondaryField df, int32_t imm5, + MSARegister ws, MSARegister wd); + + void GenInstrMsaBit(SecondaryField operation, SecondaryField df, uint32_t m, + MSARegister ws, MSARegister wd); + + void GenInstrMsaI10(SecondaryField operation, SecondaryField df, + int32_t imm10, MSARegister wd); + + template + void GenInstrMsa3R(SecondaryField operation, SecondaryField df, RegType t, + MSARegister ws, MSARegister wd); + + template + void GenInstrMsaElm(SecondaryField operation, SecondaryField df, uint32_t n, + SrcType src, DstType dst); + + void GenInstrMsa3RF(SecondaryField operation, uint32_t df, MSARegister wt, + MSARegister ws, MSARegister wd); + + void GenInstrMsaVec(SecondaryField operation, MSARegister wt, MSARegister ws, + MSARegister wd); + + void GenInstrMsaMI10(SecondaryField operation, int32_t s10, Register rs, + MSARegister wd); + + void GenInstrMsa2R(SecondaryField operation, SecondaryField df, + MSARegister ws, MSARegister wd); + + void GenInstrMsa2RF(SecondaryField operation, SecondaryField df, + MSARegister ws, MSARegister wd); + + void GenInstrMsaBranch(SecondaryField operation, MSARegister wt, + int32_t offset16); + + inline bool is_valid_msa_df_m(SecondaryField bit_df, uint32_t m) { + switch (bit_df) { + case BIT_DF_b: + return is_uint3(m); + case BIT_DF_h: + return is_uint4(m); + case BIT_DF_w: + return is_uint5(m); + case BIT_DF_d: + return is_uint6(m); + default: + return false; + } + } + + inline bool is_valid_msa_df_n(SecondaryField elm_df, uint32_t n) { + switch (elm_df) { + case ELM_DF_B: + return is_uint4(n); + case ELM_DF_H: + return is_uint3(n); + case ELM_DF_W: + return is_uint2(n); + case ELM_DF_D: + return is_uint1(n); + default: + return false; + } + } + + // Labels. + void print(const Label* L); + void bind_to(Label* L, int pos); + void next(Label* L, bool is_internal); + + // Patching lui/ori pair which is commonly used for loading constants. + static void PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr1, + Address offset_lui, Instr instr2, + Address offset_ori); + void PatchLuiOriImmediate(int pc, int32_t imm, Instr instr1, + Address offset_lui, Instr instr2, + Address offset_ori); + + // One trampoline consists of: + // - space for trampoline slots, + // - space for labels. + // + // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. + // Space for trampoline slots precedes space for labels. Each label is of one + // instruction size, so total amount for labels is equal to + // label_count * kInstrSize. + class Trampoline { + public: + Trampoline() { + start_ = 0; + next_slot_ = 0; + free_slot_count_ = 0; + end_ = 0; + } + Trampoline(int start, int slot_count) { + start_ = start; + next_slot_ = start; + free_slot_count_ = slot_count; + end_ = start + slot_count * kTrampolineSlotsSize; + } + int start() { return start_; } + int end() { return end_; } + int take_slot() { + int trampoline_slot = kInvalidSlotPos; + if (free_slot_count_ <= 0) { + // We have run out of space on trampolines. + // Make sure we fail in debug mode, so we become aware of each case + // when this happens. + DCHECK(0); + // Internal exception will be caught. + } else { + trampoline_slot = next_slot_; + free_slot_count_--; + next_slot_ += kTrampolineSlotsSize; + } + return trampoline_slot; + } + + private: + int start_; + int end_; + int next_slot_; + int free_slot_count_; + }; + + int32_t get_trampoline_entry(int32_t pos); + int unbound_labels_count_; + // If trampoline is emitted, generated code is becoming large. As this is + // already a slow case which can possibly break our code generation for the + // extreme case, we use this information to trigger different mode of + // branch instruction generation, where we use jump instructions rather + // than regular branch instructions. + bool trampoline_emitted_; + static constexpr int kInvalidSlotPos = -1; + + // Internal reference positions, required for unbounded internal reference + // labels. + std::set internal_reference_positions_; + bool is_internal_reference(Label* L) { + return internal_reference_positions_.find(L->pos()) != + internal_reference_positions_.end(); + } + + void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } + void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } + bool prev_instr_compact_branch_ = false; + + Trampoline trampoline_; + bool internal_trampoline_exception_; + + // Keep track of the last Call's position to ensure that safepoint can get the + // correct information even if there is a trampoline immediately after the + // Call. + byte* pc_for_safepoint_; + + private: + void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); + + int WriteCodeComments(); + + friend class RegExpMacroAssemblerMIPS; + friend class RelocInfo; + friend class BlockTrampolinePoolScope; + friend class EnsureSpace; +}; + +class EnsureSpace { + public: + explicit V8_INLINE EnsureSpace(Assembler* assembler); +}; + +class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { + public: + explicit UseScratchRegisterScope(Assembler* assembler); + ~UseScratchRegisterScope(); + + Register Acquire(); + bool hasAvailable() const; + + void Include(const RegList& list) { *available_ |= list; } + void Exclude(const RegList& list) { available_->clear(list); } + void Include(const Register& reg1, const Register& reg2 = no_reg) { + RegList list({reg1, reg2}); + Include(list); + } + void Exclude(const Register& reg1, const Register& reg2 = no_reg) { + RegList list({reg1, reg2}); + Exclude(list); + } + + private: + RegList* available_; + RegList old_available_; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ diff --git a/deps/v8/src/codegen/mips/constants-mips.cc b/deps/v8/src/codegen/mips/constants-mips.cc new file mode 100644 index 00000000000000..44113870603303 --- /dev/null +++ b/deps/v8/src/codegen/mips/constants-mips.cc @@ -0,0 +1,144 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_MIPS + +#include "src/codegen/mips/constants-mips.h" + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// Registers. + +// These register names are defined in a way to match the native disassembler +// formatting. See for example the command "objdump -d ". +const char* Registers::names_[kNumSimuRegisters] = { + "zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", + "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", + "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", + "k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"}; + +// List of alias names which can be used when referring to MIPS registers. +const Registers::RegisterAlias Registers::aliases_[] = { + {0, "zero"}, + {23, "cp"}, + {30, "s8"}, + {30, "s8_fp"}, + {kInvalidRegister, nullptr}}; + +const char* Registers::Name(int reg) { + const char* result; + if ((0 <= reg) && (reg < kNumSimuRegisters)) { + result = names_[reg]; + } else { + result = "noreg"; + } + return result; +} + +int Registers::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumSimuRegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].reg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].reg; + } + i++; + } + + // No register with the reguested name found. + return kInvalidRegister; +} + +const char* FPURegisters::names_[kNumFPURegisters] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", + "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", + "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; + +// List of alias names which can be used when referring to MIPS registers. +const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { + {kInvalidRegister, nullptr}}; + +const char* FPURegisters::Name(int creg) { + const char* result; + if ((0 <= creg) && (creg < kNumFPURegisters)) { + result = names_[creg]; + } else { + result = "nocreg"; + } + return result; +} + +int FPURegisters::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumFPURegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].creg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].creg; + } + i++; + } + + // No Cregister with the reguested name found. + return kInvalidFPURegister; +} + +const char* MSARegisters::names_[kNumMSARegisters] = { + "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", + "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", + "w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "w31"}; + +const MSARegisters::RegisterAlias MSARegisters::aliases_[] = { + {kInvalidRegister, nullptr}}; + +const char* MSARegisters::Name(int creg) { + const char* result; + if ((0 <= creg) && (creg < kNumMSARegisters)) { + result = names_[creg]; + } else { + result = "nocreg"; + } + return result; +} + +int MSARegisters::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumMSARegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].creg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].creg; + } + i++; + } + + // No Cregister with the reguested name found. + return kInvalidMSARegister; +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/codegen/mips/constants-mips.h b/deps/v8/src/codegen/mips/constants-mips.h new file mode 100644 index 00000000000000..5ed338e3e02d37 --- /dev/null +++ b/deps/v8/src/codegen/mips/constants-mips.h @@ -0,0 +1,1924 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_ +#define V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_ +#include "src/codegen/cpu-features.h" +// UNIMPLEMENTED_ macro for MIPS. +#ifdef DEBUG +#define UNIMPLEMENTED_MIPS() \ + v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ + __FILE__, __LINE__, __func__) +#else +#define UNIMPLEMENTED_MIPS() +#endif + +#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n") + +enum ArchVariants { + kMips32r1 = v8::internal::MIPSr1, + kMips32r2 = v8::internal::MIPSr2, + kMips32r6 = v8::internal::MIPSr6, + kLoongson +}; + +#ifdef _MIPS_ARCH_MIPS32R2 +static const ArchVariants kArchVariant = kMips32r2; +#elif _MIPS_ARCH_MIPS32R6 +static const ArchVariants kArchVariant = kMips32r6; +#elif _MIPS_ARCH_LOONGSON +// The loongson flag refers to the LOONGSON architectures based on MIPS-III, +// which predates (and is a subset of) the mips32r2 and r1 architectures. +static const ArchVariants kArchVariant = kLoongson; +#elif _MIPS_ARCH_MIPS32RX +// This flags referred to compatibility mode that creates universal code that +// can run on any MIPS32 architecture revision. The dynamically generated code +// by v8 is specialized for the MIPS host detected in runtime probing. +static const ArchVariants kArchVariant = kMips32r1; +#else +static const ArchVariants kArchVariant = kMips32r1; +#endif + +enum Endianness { kLittle, kBig }; + +#if defined(V8_TARGET_LITTLE_ENDIAN) +static const Endianness kArchEndian = kLittle; +#elif defined(V8_TARGET_BIG_ENDIAN) +static const Endianness kArchEndian = kBig; +#else +#error Unknown endianness +#endif + +enum FpuMode { kFP32, kFP64, kFPXX }; + +#if defined(FPU_MODE_FP32) +static const FpuMode kFpuMode = kFP32; +#elif defined(FPU_MODE_FP64) +static const FpuMode kFpuMode = kFP64; +#elif defined(FPU_MODE_FPXX) +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6) +static const FpuMode kFpuMode = kFPXX; +#else +#error "FPXX is supported only on Mips32R2 and Mips32R6" +#endif +#else +static const FpuMode kFpuMode = kFP32; +#endif + +#if defined(__mips_hard_float) && __mips_hard_float != 0 +// Use floating-point coprocessor instructions. This flag is raised when +// -mhard-float is passed to the compiler. +const bool IsMipsSoftFloatABI = false; +#elif defined(__mips_soft_float) && __mips_soft_float != 0 +// This flag is raised when -msoft-float is passed to the compiler. +// Although FPU is a base requirement for v8, soft-float ABI is used +// on soft-float systems with FPU kernel emulation. +const bool IsMipsSoftFloatABI = true; +#else +const bool IsMipsSoftFloatABI = true; +#endif + +#if defined(V8_TARGET_LITTLE_ENDIAN) +const uint32_t kHoleNanUpper32Offset = 4; +const uint32_t kHoleNanLower32Offset = 0; +#elif defined(V8_TARGET_BIG_ENDIAN) +const uint32_t kHoleNanUpper32Offset = 0; +const uint32_t kHoleNanLower32Offset = 4; +#else +#error Unknown endianness +#endif + +constexpr bool IsFp64Mode() { return kFpuMode == kFP64; } +constexpr bool IsFp32Mode() { return kFpuMode == kFP32; } +constexpr bool IsFpxxMode() { return kFpuMode == kFPXX; } + +#ifndef _MIPS_ARCH_MIPS32RX +constexpr bool IsMipsArchVariant(const ArchVariants check) { + return kArchVariant == check; +} +#else +bool IsMipsArchVariant(const ArchVariants check) { + return CpuFeatures::IsSupported(static_cast(check)); +} +#endif + +#if defined(V8_TARGET_LITTLE_ENDIAN) +const uint32_t kMipsLwrOffset = 0; +const uint32_t kMipsLwlOffset = 3; +const uint32_t kMipsSwrOffset = 0; +const uint32_t kMipsSwlOffset = 3; +#elif defined(V8_TARGET_BIG_ENDIAN) +const uint32_t kMipsLwrOffset = 3; +const uint32_t kMipsLwlOffset = 0; +const uint32_t kMipsSwrOffset = 3; +const uint32_t kMipsSwlOffset = 0; +#else +#error Unknown endianness +#endif + +#if defined(V8_TARGET_LITTLE_ENDIAN) +const uint32_t kLeastSignificantByteInInt32Offset = 0; +#elif defined(V8_TARGET_BIG_ENDIAN) +const uint32_t kLeastSignificantByteInInt32Offset = 3; +#else +#error Unknown endianness +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include + +// Defines constants and accessor classes to assemble, disassemble and +// simulate MIPS32 instructions. +// +// See: MIPS32 Architecture For Programmers +// Volume II: The MIPS32 Instruction Set +// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf. + +namespace v8 { +namespace internal { + +constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096; + +// ----------------------------------------------------------------------------- +// Registers and FPURegisters. + +// Number of general purpose registers. +const int kNumRegisters = 32; +const int kInvalidRegister = -1; + +// Number of registers with HI, LO, and pc. +const int kNumSimuRegisters = 35; + +// In the simulator, the PC register is simulated as the 34th register. +const int kPCRegister = 34; + +// Number coprocessor registers. +const int kNumFPURegisters = 32; +const int kInvalidFPURegister = -1; + +// Number of MSA registers +const int kNumMSARegisters = 32; +const int kInvalidMSARegister = -1; + +const int kInvalidMSAControlRegister = -1; +const int kMSAIRRegister = 0; +const int kMSACSRRegister = 1; +const int kMSARegSize = 128; +const int kMSALanesByte = kMSARegSize / 8; +const int kMSALanesHalf = kMSARegSize / 16; +const int kMSALanesWord = kMSARegSize / 32; +const int kMSALanesDword = kMSARegSize / 64; + +// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. +const int kFCSRRegister = 31; +const int kInvalidFPUControlRegister = -1; +const uint32_t kFPUInvalidResult = static_cast(1u << 31) - 1; +const int32_t kFPUInvalidResultNegative = static_cast(1u << 31); +const uint64_t kFPU64InvalidResult = + static_cast(static_cast(1) << 63) - 1; +const int64_t kFPU64InvalidResultNegative = + static_cast(static_cast(1) << 63); + +// FCSR constants. +const uint32_t kFCSRInexactFlagBit = 2; +const uint32_t kFCSRUnderflowFlagBit = 3; +const uint32_t kFCSROverflowFlagBit = 4; +const uint32_t kFCSRDivideByZeroFlagBit = 5; +const uint32_t kFCSRInvalidOpFlagBit = 6; +const uint32_t kFCSRNaN2008FlagBit = 18; + +const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; +const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; +const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; +const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; +const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; +const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit; + +const uint32_t kFCSRFlagMask = + kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask | + kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask; + +const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; + +const uint32_t kFCSRInexactCauseBit = 12; +const uint32_t kFCSRUnderflowCauseBit = 13; +const uint32_t kFCSROverflowCauseBit = 14; +const uint32_t kFCSRDivideByZeroCauseBit = 15; +const uint32_t kFCSRInvalidOpCauseBit = 16; +const uint32_t kFCSRUnimplementedOpCauseBit = 17; + +const uint32_t kFCSRInexactCauseMask = 1 << kFCSRInexactCauseBit; +const uint32_t kFCSRUnderflowCauseMask = 1 << kFCSRUnderflowCauseBit; +const uint32_t kFCSROverflowCauseMask = 1 << kFCSROverflowCauseBit; +const uint32_t kFCSRDivideByZeroCauseMask = 1 << kFCSRDivideByZeroCauseBit; +const uint32_t kFCSRInvalidOpCauseMask = 1 << kFCSRInvalidOpCauseBit; +const uint32_t kFCSRUnimplementedOpCauseMask = 1 + << kFCSRUnimplementedOpCauseBit; + +const uint32_t kFCSRCauseMask = + kFCSRInexactCauseMask | kFCSRUnderflowCauseMask | kFCSROverflowCauseMask | + kFCSRDivideByZeroCauseMask | kFCSRInvalidOpCauseMask | + kFCSRUnimplementedOpCauseBit; + +// 'pref' instruction hints +const int32_t kPrefHintLoad = 0; +const int32_t kPrefHintStore = 1; +const int32_t kPrefHintLoadStreamed = 4; +const int32_t kPrefHintStoreStreamed = 5; +const int32_t kPrefHintLoadRetained = 6; +const int32_t kPrefHintStoreRetained = 7; +const int32_t kPrefHintWritebackInvalidate = 25; +const int32_t kPrefHintPrepareForStore = 30; + +// Actual value of root register is offset from the root array's start +// to take advantage of negative displacement values. +// TODO(sigurds): Choose best value. +constexpr int kRootRegisterBias = 256; + +// Helper functions for converting between register numbers and names. +class Registers { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int reg; + const char* name; + }; + + static const int32_t kMaxValue = 0x7fffffff; + static const int32_t kMinValue = 0x80000000; + + private: + static const char* names_[kNumSimuRegisters]; + static const RegisterAlias aliases_[]; +}; + +// Helper functions for converting between register numbers and names. +class FPURegisters { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int creg; + const char* name; + }; + + private: + static const char* names_[kNumFPURegisters]; + static const RegisterAlias aliases_[]; +}; + +// Helper functions for converting between register numbers and names. +class MSARegisters { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int creg; + const char* name; + }; + + private: + static const char* names_[kNumMSARegisters]; + static const RegisterAlias aliases_[]; +}; + +// ----------------------------------------------------------------------------- +// Instructions encoding constants. + +// On MIPS all instructions are 32 bits. +using Instr = int32_t; + +// Special Software Interrupt codes when used in the presence of the MIPS +// simulator. +enum SoftwareInterruptCodes { + // Transition to C code. + call_rt_redirected = 0xfffff +}; + +// On MIPS Simulator breakpoints can have different codes: +// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, +// the simulator will run through them and print the registers. +// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() +// instructions (see Assembler::stop()). +// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the +// debugger. +const uint32_t kMaxWatchpointCode = 31; +const uint32_t kMaxStopCode = 127; +STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode); + +// ----- Fields offset and length. +const int kOpcodeShift = 26; +const int kOpcodeBits = 6; +const int kRsShift = 21; +const int kRsBits = 5; +const int kRtShift = 16; +const int kRtBits = 5; +const int kRdShift = 11; +const int kRdBits = 5; +const int kSaShift = 6; +const int kSaBits = 5; +const int kLsaSaBits = 2; +const int kFunctionShift = 0; +const int kFunctionBits = 6; +const int kLuiShift = 16; +const int kBp2Shift = 6; +const int kBp2Bits = 2; +const int kBaseShift = 21; +const int kBaseBits = 5; +const int kBit6Shift = 6; +const int kBit6Bits = 1; + +const int kImm9Shift = 7; +const int kImm9Bits = 9; +const int kImm16Shift = 0; +const int kImm16Bits = 16; +const int kImm18Shift = 0; +const int kImm18Bits = 18; +const int kImm19Shift = 0; +const int kImm19Bits = 19; +const int kImm21Shift = 0; +const int kImm21Bits = 21; +const int kImm26Shift = 0; +const int kImm26Bits = 26; +const int kImm28Shift = 0; +const int kImm28Bits = 28; +const int kImm32Shift = 0; +const int kImm32Bits = 32; +const int kMsaImm8Shift = 16; +const int kMsaImm8Bits = 8; +const int kMsaImm5Shift = 16; +const int kMsaImm5Bits = 5; +const int kMsaImm10Shift = 11; +const int kMsaImm10Bits = 10; +const int kMsaImmMI10Shift = 16; +const int kMsaImmMI10Bits = 10; + +// In branches and jumps immediate fields point to words, not bytes, +// and are therefore shifted by 2. +const int kImmFieldShift = 2; + +const int kFrBits = 5; +const int kFrShift = 21; +const int kFsShift = 11; +const int kFsBits = 5; +const int kFtShift = 16; +const int kFtBits = 5; +const int kFdShift = 6; +const int kFdBits = 5; +const int kFCccShift = 8; +const int kFCccBits = 3; +const int kFBccShift = 18; +const int kFBccBits = 3; +const int kFBtrueShift = 16; +const int kFBtrueBits = 1; +const int kWtBits = 5; +const int kWtShift = 16; +const int kWsBits = 5; +const int kWsShift = 11; +const int kWdBits = 5; +const int kWdShift = 6; + +// ----- Miscellaneous useful masks. +// Instruction bit masks. +const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift; +const int kImm9Mask = ((1 << kImm9Bits) - 1) << kImm9Shift; +const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; +const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift; +const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift; +const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift; +const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift; +const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift; +const int kImm5Mask = ((1 << 5) - 1); +const int kImm8Mask = ((1 << 8) - 1); +const int kImm10Mask = ((1 << 10) - 1); +const int kMsaI5I10Mask = ((7U << 23) | ((1 << 6) - 1)); +const int kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1)); +const int kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1)); +const int kMsaMI10Mask = (15U << 2); +const int kMsaBITMask = ((7U << 23) | ((1 << 6) - 1)); +const int kMsaELMMask = (15U << 22); +const int kMsaLongerELMMask = kMsaELMMask | (63U << 16); +const int kMsa3RMask = ((7U << 23) | ((1 << 6) - 1)); +const int kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1)); +const int kMsaVECMask = (23U << 21); +const int kMsa2RMask = (7U << 18); +const int kMsa2RFMask = (15U << 17); +const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift; +const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift; +const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; +const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift; +const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift; +// Misc masks. +const int kHiMask = 0xffff << 16; +const int kLoMask = 0xffff; +const int kSignMask = 0x80000000; +const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1; + +// ----- MIPS Opcodes and Function Fields. +// We use this presentation to stay close to the table representation in +// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set. +enum Opcode : uint32_t { + SPECIAL = 0U << kOpcodeShift, + REGIMM = 1U << kOpcodeShift, + + J = ((0U << 3) + 2) << kOpcodeShift, + JAL = ((0U << 3) + 3) << kOpcodeShift, + BEQ = ((0U << 3) + 4) << kOpcodeShift, + BNE = ((0U << 3) + 5) << kOpcodeShift, + BLEZ = ((0U << 3) + 6) << kOpcodeShift, + BGTZ = ((0U << 3) + 7) << kOpcodeShift, + + ADDI = ((1U << 3) + 0) << kOpcodeShift, + ADDIU = ((1U << 3) + 1) << kOpcodeShift, + SLTI = ((1U << 3) + 2) << kOpcodeShift, + SLTIU = ((1U << 3) + 3) << kOpcodeShift, + ANDI = ((1U << 3) + 4) << kOpcodeShift, + ORI = ((1U << 3) + 5) << kOpcodeShift, + XORI = ((1U << 3) + 6) << kOpcodeShift, + LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family. + + BEQC = ((2U << 3) + 0) << kOpcodeShift, + COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class. + BEQL = ((2U << 3) + 4) << kOpcodeShift, + BNEL = ((2U << 3) + 5) << kOpcodeShift, + BLEZL = ((2U << 3) + 6) << kOpcodeShift, + BGTZL = ((2U << 3) + 7) << kOpcodeShift, + + DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC. + SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift, + MSA = ((3U << 3) + 6) << kOpcodeShift, + SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift, + + LB = ((4U << 3) + 0) << kOpcodeShift, + LH = ((4U << 3) + 1) << kOpcodeShift, + LWL = ((4U << 3) + 2) << kOpcodeShift, + LW = ((4U << 3) + 3) << kOpcodeShift, + LBU = ((4U << 3) + 4) << kOpcodeShift, + LHU = ((4U << 3) + 5) << kOpcodeShift, + LWR = ((4U << 3) + 6) << kOpcodeShift, + SB = ((5U << 3) + 0) << kOpcodeShift, + SH = ((5U << 3) + 1) << kOpcodeShift, + SWL = ((5U << 3) + 2) << kOpcodeShift, + SW = ((5U << 3) + 3) << kOpcodeShift, + SWR = ((5U << 3) + 6) << kOpcodeShift, + + LL = ((6U << 3) + 0) << kOpcodeShift, + LWC1 = ((6U << 3) + 1) << kOpcodeShift, + BC = ((6U << 3) + 2) << kOpcodeShift, + LDC1 = ((6U << 3) + 5) << kOpcodeShift, + POP66 = ((6U << 3) + 6) << kOpcodeShift, // beqzc, jic + + PREF = ((6U << 3) + 3) << kOpcodeShift, + + SC = ((7U << 3) + 0) << kOpcodeShift, + SWC1 = ((7U << 3) + 1) << kOpcodeShift, + BALC = ((7U << 3) + 2) << kOpcodeShift, + PCREL = ((7U << 3) + 3) << kOpcodeShift, + SDC1 = ((7U << 3) + 5) << kOpcodeShift, + POP76 = ((7U << 3) + 6) << kOpcodeShift, // bnezc, jialc + + COP1X = ((1U << 4) + 3) << kOpcodeShift, + + // New r6 instruction. + POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc + POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc + POP10 = ADDI, // beqzalc, bovc, beqc + POP26 = BLEZL, // bgezc, blezc, bgec/blec + POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc + POP30 = DADDI, // bnezalc, bnvc, bnec +}; + +enum SecondaryField : uint32_t { + // SPECIAL Encoding of Function Field. + SLL = ((0U << 3) + 0), + MOVCI = ((0U << 3) + 1), + SRL = ((0U << 3) + 2), + SRA = ((0U << 3) + 3), + SLLV = ((0U << 3) + 4), + LSA = ((0U << 3) + 5), + SRLV = ((0U << 3) + 6), + SRAV = ((0U << 3) + 7), + + JR = ((1U << 3) + 0), + JALR = ((1U << 3) + 1), + MOVZ = ((1U << 3) + 2), + MOVN = ((1U << 3) + 3), + BREAK = ((1U << 3) + 5), + SYNC = ((1U << 3) + 7), + + MFHI = ((2U << 3) + 0), + CLZ_R6 = ((2U << 3) + 0), + CLO_R6 = ((2U << 3) + 1), + MFLO = ((2U << 3) + 2), + + MULT = ((3U << 3) + 0), + MULTU = ((3U << 3) + 1), + DIV = ((3U << 3) + 2), + DIVU = ((3U << 3) + 3), + + ADD = ((4U << 3) + 0), + ADDU = ((4U << 3) + 1), + SUB = ((4U << 3) + 2), + SUBU = ((4U << 3) + 3), + AND = ((4U << 3) + 4), + OR = ((4U << 3) + 5), + XOR = ((4U << 3) + 6), + NOR = ((4U << 3) + 7), + + SLT = ((5U << 3) + 2), + SLTU = ((5U << 3) + 3), + + TGE = ((6U << 3) + 0), + TGEU = ((6U << 3) + 1), + TLT = ((6U << 3) + 2), + TLTU = ((6U << 3) + 3), + TEQ = ((6U << 3) + 4), + SELEQZ_S = ((6U << 3) + 5), + TNE = ((6U << 3) + 6), + SELNEZ_S = ((6U << 3) + 7), + + // Multiply integers in r6. + MUL_MUH = ((3U << 3) + 0), // MUL, MUH. + MUL_MUH_U = ((3U << 3) + 1), // MUL_U, MUH_U. + RINT = ((3U << 3) + 2), + + MUL_OP = ((0U << 3) + 2), + MUH_OP = ((0U << 3) + 3), + DIV_OP = ((0U << 3) + 2), + MOD_OP = ((0U << 3) + 3), + + DIV_MOD = ((3U << 3) + 2), + DIV_MOD_U = ((3U << 3) + 3), + + // SPECIAL2 Encoding of Function Field. + MUL = ((0U << 3) + 2), + CLZ = ((4U << 3) + 0), + CLO = ((4U << 3) + 1), + + // SPECIAL3 Encoding of Function Field. + EXT = ((0U << 3) + 0), + INS = ((0U << 3) + 4), + BSHFL = ((4U << 3) + 0), + SC_R6 = ((4U << 3) + 6), + LL_R6 = ((6U << 3) + 6), + + // SPECIAL3 Encoding of sa Field. + BITSWAP = ((0U << 3) + 0), + ALIGN = ((0U << 3) + 2), + WSBH = ((0U << 3) + 2), + SEB = ((2U << 3) + 0), + SEH = ((3U << 3) + 0), + + // REGIMM encoding of rt Field. + BLTZ = ((0U << 3) + 0) << 16, + BGEZ = ((0U << 3) + 1) << 16, + BLTZAL = ((2U << 3) + 0) << 16, + BGEZAL = ((2U << 3) + 1) << 16, + BGEZALL = ((2U << 3) + 3) << 16, + + // COP1 Encoding of rs Field. + MFC1 = ((0U << 3) + 0) << 21, + CFC1 = ((0U << 3) + 2) << 21, + MFHC1 = ((0U << 3) + 3) << 21, + MTC1 = ((0U << 3) + 4) << 21, + CTC1 = ((0U << 3) + 6) << 21, + MTHC1 = ((0U << 3) + 7) << 21, + BC1 = ((1U << 3) + 0) << 21, + S = ((2U << 3) + 0) << 21, + D = ((2U << 3) + 1) << 21, + W = ((2U << 3) + 4) << 21, + L = ((2U << 3) + 5) << 21, + PS = ((2U << 3) + 6) << 21, + // COP1 Encoding of Function Field When rs=S. + + ADD_S = ((0U << 3) + 0), + SUB_S = ((0U << 3) + 1), + MUL_S = ((0U << 3) + 2), + DIV_S = ((0U << 3) + 3), + ABS_S = ((0U << 3) + 5), + SQRT_S = ((0U << 3) + 4), + MOV_S = ((0U << 3) + 6), + NEG_S = ((0U << 3) + 7), + ROUND_L_S = ((1U << 3) + 0), + TRUNC_L_S = ((1U << 3) + 1), + CEIL_L_S = ((1U << 3) + 2), + FLOOR_L_S = ((1U << 3) + 3), + ROUND_W_S = ((1U << 3) + 4), + TRUNC_W_S = ((1U << 3) + 5), + CEIL_W_S = ((1U << 3) + 6), + FLOOR_W_S = ((1U << 3) + 7), + RECIP_S = ((2U << 3) + 5), + RSQRT_S = ((2U << 3) + 6), + MADDF_S = ((3U << 3) + 0), + MSUBF_S = ((3U << 3) + 1), + CLASS_S = ((3U << 3) + 3), + CVT_D_S = ((4U << 3) + 1), + CVT_W_S = ((4U << 3) + 4), + CVT_L_S = ((4U << 3) + 5), + CVT_PS_S = ((4U << 3) + 6), + + // COP1 Encoding of Function Field When rs=D. + ADD_D = ((0U << 3) + 0), + SUB_D = ((0U << 3) + 1), + MUL_D = ((0U << 3) + 2), + DIV_D = ((0U << 3) + 3), + SQRT_D = ((0U << 3) + 4), + ABS_D = ((0U << 3) + 5), + MOV_D = ((0U << 3) + 6), + NEG_D = ((0U << 3) + 7), + ROUND_L_D = ((1U << 3) + 0), + TRUNC_L_D = ((1U << 3) + 1), + CEIL_L_D = ((1U << 3) + 2), + FLOOR_L_D = ((1U << 3) + 3), + ROUND_W_D = ((1U << 3) + 4), + TRUNC_W_D = ((1U << 3) + 5), + CEIL_W_D = ((1U << 3) + 6), + FLOOR_W_D = ((1U << 3) + 7), + RECIP_D = ((2U << 3) + 5), + RSQRT_D = ((2U << 3) + 6), + MADDF_D = ((3U << 3) + 0), + MSUBF_D = ((3U << 3) + 1), + CLASS_D = ((3U << 3) + 3), + MIN = ((3U << 3) + 4), + MINA = ((3U << 3) + 5), + MAX = ((3U << 3) + 6), + MAXA = ((3U << 3) + 7), + CVT_S_D = ((4U << 3) + 0), + CVT_W_D = ((4U << 3) + 4), + CVT_L_D = ((4U << 3) + 5), + C_F_D = ((6U << 3) + 0), + C_UN_D = ((6U << 3) + 1), + C_EQ_D = ((6U << 3) + 2), + C_UEQ_D = ((6U << 3) + 3), + C_OLT_D = ((6U << 3) + 4), + C_ULT_D = ((6U << 3) + 5), + C_OLE_D = ((6U << 3) + 6), + C_ULE_D = ((6U << 3) + 7), + + // COP1 Encoding of Function Field When rs=W or L. + CVT_S_W = ((4U << 3) + 0), + CVT_D_W = ((4U << 3) + 1), + CVT_S_L = ((4U << 3) + 0), + CVT_D_L = ((4U << 3) + 1), + BC1EQZ = ((2U << 2) + 1) << 21, + BC1NEZ = ((3U << 2) + 1) << 21, + // COP1 CMP positive predicates Bit 5..4 = 00. + CMP_AF = ((0U << 3) + 0), + CMP_UN = ((0U << 3) + 1), + CMP_EQ = ((0U << 3) + 2), + CMP_UEQ = ((0U << 3) + 3), + CMP_LT = ((0U << 3) + 4), + CMP_ULT = ((0U << 3) + 5), + CMP_LE = ((0U << 3) + 6), + CMP_ULE = ((0U << 3) + 7), + CMP_SAF = ((1U << 3) + 0), + CMP_SUN = ((1U << 3) + 1), + CMP_SEQ = ((1U << 3) + 2), + CMP_SUEQ = ((1U << 3) + 3), + CMP_SSLT = ((1U << 3) + 4), + CMP_SSULT = ((1U << 3) + 5), + CMP_SLE = ((1U << 3) + 6), + CMP_SULE = ((1U << 3) + 7), + // COP1 CMP negative predicates Bit 5..4 = 01. + CMP_AT = ((2U << 3) + 0), // Reserved, not implemented. + CMP_OR = ((2U << 3) + 1), + CMP_UNE = ((2U << 3) + 2), + CMP_NE = ((2U << 3) + 3), + CMP_UGE = ((2U << 3) + 4), // Reserved, not implemented. + CMP_OGE = ((2U << 3) + 5), // Reserved, not implemented. + CMP_UGT = ((2U << 3) + 6), // Reserved, not implemented. + CMP_OGT = ((2U << 3) + 7), // Reserved, not implemented. + CMP_SAT = ((3U << 3) + 0), // Reserved, not implemented. + CMP_SOR = ((3U << 3) + 1), + CMP_SUNE = ((3U << 3) + 2), + CMP_SNE = ((3U << 3) + 3), + CMP_SUGE = ((3U << 3) + 4), // Reserved, not implemented. + CMP_SOGE = ((3U << 3) + 5), // Reserved, not implemented. + CMP_SUGT = ((3U << 3) + 6), // Reserved, not implemented. + CMP_SOGT = ((3U << 3) + 7), // Reserved, not implemented. + + SEL = ((2U << 3) + 0), + MOVZ_C = ((2U << 3) + 2), + MOVN_C = ((2U << 3) + 3), + SELEQZ_C = ((2U << 3) + 4), // COP1 on FPR registers. + MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt + SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers. + // COP1 Encoding of Function Field When rs=PS. + + // COP1X Encoding of Function Field. + MADD_S = ((4U << 3) + 0), + MADD_D = ((4U << 3) + 1), + MSUB_S = ((5U << 3) + 0), + MSUB_D = ((5U << 3) + 1), + + // PCREL Encoding of rt Field. + ADDIUPC = ((0U << 2) + 0), + LWPC = ((0U << 2) + 1), + AUIPC = ((3U << 3) + 6), + ALUIPC = ((3U << 3) + 7), + + // POP66 Encoding of rs Field. + JIC = ((0U << 5) + 0), + + // POP76 Encoding of rs Field. + JIALC = ((0U << 5) + 0), + + // COP1 Encoding of rs Field for MSA Branch Instructions + BZ_V = (((1U << 3) + 3) << kRsShift), + BNZ_V = (((1U << 3) + 7) << kRsShift), + BZ_B = (((3U << 3) + 0) << kRsShift), + BZ_H = (((3U << 3) + 1) << kRsShift), + BZ_W = (((3U << 3) + 2) << kRsShift), + BZ_D = (((3U << 3) + 3) << kRsShift), + BNZ_B = (((3U << 3) + 4) << kRsShift), + BNZ_H = (((3U << 3) + 5) << kRsShift), + BNZ_W = (((3U << 3) + 6) << kRsShift), + BNZ_D = (((3U << 3) + 7) << kRsShift), + + // MSA: Operation Field for MI10 Instruction Formats + MSA_LD = (8U << 2), + MSA_ST = (9U << 2), + LD_B = ((8U << 2) + 0), + LD_H = ((8U << 2) + 1), + LD_W = ((8U << 2) + 2), + LD_D = ((8U << 2) + 3), + ST_B = ((9U << 2) + 0), + ST_H = ((9U << 2) + 1), + ST_W = ((9U << 2) + 2), + ST_D = ((9U << 2) + 3), + + // MSA: Operation Field for I5 Instruction Format + ADDVI = ((0U << 23) + 6), + SUBVI = ((1U << 23) + 6), + MAXI_S = ((2U << 23) + 6), + MAXI_U = ((3U << 23) + 6), + MINI_S = ((4U << 23) + 6), + MINI_U = ((5U << 23) + 6), + CEQI = ((0U << 23) + 7), + CLTI_S = ((2U << 23) + 7), + CLTI_U = ((3U << 23) + 7), + CLEI_S = ((4U << 23) + 7), + CLEI_U = ((5U << 23) + 7), + LDI = ((6U << 23) + 7), // I10 instruction format + I5_DF_b = (0U << 21), + I5_DF_h = (1U << 21), + I5_DF_w = (2U << 21), + I5_DF_d = (3U << 21), + + // MSA: Operation Field for I8 Instruction Format + ANDI_B = ((0U << 24) + 0), + ORI_B = ((1U << 24) + 0), + NORI_B = ((2U << 24) + 0), + XORI_B = ((3U << 24) + 0), + BMNZI_B = ((0U << 24) + 1), + BMZI_B = ((1U << 24) + 1), + BSELI_B = ((2U << 24) + 1), + SHF_B = ((0U << 24) + 2), + SHF_H = ((1U << 24) + 2), + SHF_W = ((2U << 24) + 2), + + MSA_VEC_2R_2RF_MINOR = ((3U << 3) + 6), + + // MSA: Operation Field for VEC Instruction Formats + AND_V = (((0U << 2) + 0) << 21), + OR_V = (((0U << 2) + 1) << 21), + NOR_V = (((0U << 2) + 2) << 21), + XOR_V = (((0U << 2) + 3) << 21), + BMNZ_V = (((1U << 2) + 0) << 21), + BMZ_V = (((1U << 2) + 1) << 21), + BSEL_V = (((1U << 2) + 2) << 21), + + // MSA: Operation Field for 2R Instruction Formats + MSA_2R_FORMAT = (((6U << 2) + 0) << 21), + FILL = (0U << 18), + PCNT = (1U << 18), + NLOC = (2U << 18), + NLZC = (3U << 18), + MSA_2R_DF_b = (0U << 16), + MSA_2R_DF_h = (1U << 16), + MSA_2R_DF_w = (2U << 16), + MSA_2R_DF_d = (3U << 16), + + // MSA: Operation Field for 2RF Instruction Formats + MSA_2RF_FORMAT = (((6U << 2) + 1) << 21), + FCLASS = (0U << 17), + FTRUNC_S = (1U << 17), + FTRUNC_U = (2U << 17), + FSQRT = (3U << 17), + FRSQRT = (4U << 17), + FRCP = (5U << 17), + FRINT = (6U << 17), + FLOG2 = (7U << 17), + FEXUPL = (8U << 17), + FEXUPR = (9U << 17), + FFQL = (10U << 17), + FFQR = (11U << 17), + FTINT_S = (12U << 17), + FTINT_U = (13U << 17), + FFINT_S = (14U << 17), + FFINT_U = (15U << 17), + MSA_2RF_DF_w = (0U << 16), + MSA_2RF_DF_d = (1U << 16), + + // MSA: Operation Field for 3R Instruction Format + SLL_MSA = ((0U << 23) + 13), + SRA_MSA = ((1U << 23) + 13), + SRL_MSA = ((2U << 23) + 13), + BCLR = ((3U << 23) + 13), + BSET = ((4U << 23) + 13), + BNEG = ((5U << 23) + 13), + BINSL = ((6U << 23) + 13), + BINSR = ((7U << 23) + 13), + ADDV = ((0U << 23) + 14), + SUBV = ((1U << 23) + 14), + MAX_S = ((2U << 23) + 14), + MAX_U = ((3U << 23) + 14), + MIN_S = ((4U << 23) + 14), + MIN_U = ((5U << 23) + 14), + MAX_A = ((6U << 23) + 14), + MIN_A = ((7U << 23) + 14), + CEQ = ((0U << 23) + 15), + CLT_S = ((2U << 23) + 15), + CLT_U = ((3U << 23) + 15), + CLE_S = ((4U << 23) + 15), + CLE_U = ((5U << 23) + 15), + ADD_A = ((0U << 23) + 16), + ADDS_A = ((1U << 23) + 16), + ADDS_S = ((2U << 23) + 16), + ADDS_U = ((3U << 23) + 16), + AVE_S = ((4U << 23) + 16), + AVE_U = ((5U << 23) + 16), + AVER_S = ((6U << 23) + 16), + AVER_U = ((7U << 23) + 16), + SUBS_S = ((0U << 23) + 17), + SUBS_U = ((1U << 23) + 17), + SUBSUS_U = ((2U << 23) + 17), + SUBSUU_S = ((3U << 23) + 17), + ASUB_S = ((4U << 23) + 17), + ASUB_U = ((5U << 23) + 17), + MULV = ((0U << 23) + 18), + MADDV = ((1U << 23) + 18), + MSUBV = ((2U << 23) + 18), + DIV_S_MSA = ((4U << 23) + 18), + DIV_U = ((5U << 23) + 18), + MOD_S = ((6U << 23) + 18), + MOD_U = ((7U << 23) + 18), + DOTP_S = ((0U << 23) + 19), + DOTP_U = ((1U << 23) + 19), + DPADD_S = ((2U << 23) + 19), + DPADD_U = ((3U << 23) + 19), + DPSUB_S = ((4U << 23) + 19), + DPSUB_U = ((5U << 23) + 19), + SLD = ((0U << 23) + 20), + SPLAT = ((1U << 23) + 20), + PCKEV = ((2U << 23) + 20), + PCKOD = ((3U << 23) + 20), + ILVL = ((4U << 23) + 20), + ILVR = ((5U << 23) + 20), + ILVEV = ((6U << 23) + 20), + ILVOD = ((7U << 23) + 20), + VSHF = ((0U << 23) + 21), + SRAR = ((1U << 23) + 21), + SRLR = ((2U << 23) + 21), + HADD_S = ((4U << 23) + 21), + HADD_U = ((5U << 23) + 21), + HSUB_S = ((6U << 23) + 21), + HSUB_U = ((7U << 23) + 21), + MSA_3R_DF_b = (0U << 21), + MSA_3R_DF_h = (1U << 21), + MSA_3R_DF_w = (2U << 21), + MSA_3R_DF_d = (3U << 21), + + // MSA: Operation Field for 3RF Instruction Format + FCAF = ((0U << 22) + 26), + FCUN = ((1U << 22) + 26), + FCEQ = ((2U << 22) + 26), + FCUEQ = ((3U << 22) + 26), + FCLT = ((4U << 22) + 26), + FCULT = ((5U << 22) + 26), + FCLE = ((6U << 22) + 26), + FCULE = ((7U << 22) + 26), + FSAF = ((8U << 22) + 26), + FSUN = ((9U << 22) + 26), + FSEQ = ((10U << 22) + 26), + FSUEQ = ((11U << 22) + 26), + FSLT = ((12U << 22) + 26), + FSULT = ((13U << 22) + 26), + FSLE = ((14U << 22) + 26), + FSULE = ((15U << 22) + 26), + FADD = ((0U << 22) + 27), + FSUB = ((1U << 22) + 27), + FMUL = ((2U << 22) + 27), + FDIV = ((3U << 22) + 27), + FMADD = ((4U << 22) + 27), + FMSUB = ((5U << 22) + 27), + FEXP2 = ((7U << 22) + 27), + FEXDO = ((8U << 22) + 27), + FTQ = ((10U << 22) + 27), + FMIN = ((12U << 22) + 27), + FMIN_A = ((13U << 22) + 27), + FMAX = ((14U << 22) + 27), + FMAX_A = ((15U << 22) + 27), + FCOR = ((1U << 22) + 28), + FCUNE = ((2U << 22) + 28), + FCNE = ((3U << 22) + 28), + MUL_Q = ((4U << 22) + 28), + MADD_Q = ((5U << 22) + 28), + MSUB_Q = ((6U << 22) + 28), + FSOR = ((9U << 22) + 28), + FSUNE = ((10U << 22) + 28), + FSNE = ((11U << 22) + 28), + MULR_Q = ((12U << 22) + 28), + MADDR_Q = ((13U << 22) + 28), + MSUBR_Q = ((14U << 22) + 28), + + // MSA: Operation Field for ELM Instruction Format + MSA_ELM_MINOR = ((3U << 3) + 1), + SLDI = (0U << 22), + CTCMSA = ((0U << 22) | (62U << 16)), + SPLATI = (1U << 22), + CFCMSA = ((1U << 22) | (62U << 16)), + COPY_S = (2U << 22), + MOVE_V = ((2U << 22) | (62U << 16)), + COPY_U = (3U << 22), + INSERT = (4U << 22), + INSVE = (5U << 22), + ELM_DF_B = ((0U << 4) << 16), + ELM_DF_H = ((4U << 3) << 16), + ELM_DF_W = ((12U << 2) << 16), + ELM_DF_D = ((28U << 1) << 16), + + // MSA: Operation Field for BIT Instruction Format + SLLI = ((0U << 23) + 9), + SRAI = ((1U << 23) + 9), + SRLI = ((2U << 23) + 9), + BCLRI = ((3U << 23) + 9), + BSETI = ((4U << 23) + 9), + BNEGI = ((5U << 23) + 9), + BINSLI = ((6U << 23) + 9), + BINSRI = ((7U << 23) + 9), + SAT_S = ((0U << 23) + 10), + SAT_U = ((1U << 23) + 10), + SRARI = ((2U << 23) + 10), + SRLRI = ((3U << 23) + 10), + BIT_DF_b = ((14U << 3) << 16), + BIT_DF_h = ((6U << 4) << 16), + BIT_DF_w = ((2U << 5) << 16), + BIT_DF_d = ((0U << 6) << 16), + + nullptrSF = 0U +}; + +enum MSAMinorOpcode : uint32_t { + kMsaMinorUndefined = 0, + kMsaMinorI8, + kMsaMinorI5, + kMsaMinorI10, + kMsaMinorBIT, + kMsaMinor3R, + kMsaMinor3RF, + kMsaMinorELM, + kMsaMinorVEC, + kMsaMinor2R, + kMsaMinor2RF, + kMsaMinorMI10 +}; + +// ----- Emulated conditions. +// On MIPS we use this enum to abstract from conditional branch instructions. +// The 'U' prefix is used to specify unsigned comparisons. +// Opposite conditions must be paired as odd/even numbers +// because 'NegateCondition' function flips LSB to negate condition. +enum Condition { + // Any value < 0 is considered no_condition. + kNoCondition = -1, + overflow = 0, + no_overflow = 1, + Uless = 2, + Ugreater_equal = 3, + Uless_equal = 4, + Ugreater = 5, + equal = 6, + not_equal = 7, // Unordered or Not Equal. + negative = 8, + positive = 9, + parity_even = 10, + parity_odd = 11, + less = 12, + greater_equal = 13, + less_equal = 14, + greater = 15, + ueq = 16, // Unordered or Equal. + ogl = 17, // Ordered and Not Equal. + cc_always = 18, + + // Aliases. + carry = Uless, + not_carry = Ugreater_equal, + zero = equal, + eq = equal, + not_zero = not_equal, + ne = not_equal, + nz = not_equal, + sign = negative, + not_sign = positive, + mi = negative, + pl = positive, + hi = Ugreater, + ls = Uless_equal, + ge = greater_equal, + lt = less, + gt = greater, + le = less_equal, + hs = Ugreater_equal, + lo = Uless, + al = cc_always, + ult = Uless, + uge = Ugreater_equal, + ule = Uless_equal, + ugt = Ugreater, + cc_default = kNoCondition +}; + +// Returns the equivalent of !cc. +// Negation of the default kNoCondition (-1) results in a non-default +// no_condition value (-2). As long as tests for no_condition check +// for condition < 0, this will work as expected. +inline Condition NegateCondition(Condition cc) { + DCHECK(cc != cc_always); + return static_cast(cc ^ 1); +} + +inline Condition NegateFpuCondition(Condition cc) { + DCHECK(cc != cc_always); + switch (cc) { + case ult: + return ge; + case ugt: + return le; + case uge: + return lt; + case ule: + return gt; + case lt: + return uge; + case gt: + return ule; + case ge: + return ult; + case le: + return ugt; + case eq: + return ne; + case ne: + return eq; + case ueq: + return ogl; + case ogl: + return ueq; + default: + return cc; + } +} + +enum MSABranchCondition { + all_not_zero = 0, // Branch If All Elements Are Not Zero + one_elem_not_zero, // Branch If At Least One Element of Any Format Is Not + // Zero + one_elem_zero, // Branch If At Least One Element Is Zero + all_zero // Branch If All Elements of Any Format Are Zero +}; + +inline MSABranchCondition NegateMSABranchCondition(MSABranchCondition cond) { + switch (cond) { + case all_not_zero: + return one_elem_zero; + case one_elem_not_zero: + return all_zero; + case one_elem_zero: + return all_not_zero; + case all_zero: + return one_elem_not_zero; + default: + return cond; + } +} + +enum MSABranchDF { + MSA_BRANCH_B = 0, + MSA_BRANCH_H, + MSA_BRANCH_W, + MSA_BRANCH_D, + MSA_BRANCH_V +}; + +// ----- Coprocessor conditions. +enum FPUCondition { + kNoFPUCondition = -1, + + F = 0x00, // False. + UN = 0x01, // Unordered. + EQ = 0x02, // Equal. + UEQ = 0x03, // Unordered or Equal. + OLT = 0x04, // Ordered or Less Than, on Mips release < 6. + LT = 0x04, // Ordered or Less Than, on Mips release >= 6. + ULT = 0x05, // Unordered or Less Than. + OLE = 0x06, // Ordered or Less Than or Equal, on Mips release < 6. + LE = 0x06, // Ordered or Less Than or Equal, on Mips release >= 6. + ULE = 0x07, // Unordered or Less Than or Equal. + + // Following constants are available on Mips release >= 6 only. + ORD = 0x11, // Ordered, on Mips release >= 6. + UNE = 0x12, // Not equal, on Mips release >= 6. + NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only. +}; + +// FPU rounding modes. +enum FPURoundingMode { + RN = 0 << 0, // Round to Nearest. + RZ = 1 << 0, // Round towards zero. + RP = 2 << 0, // Round towards Plus Infinity. + RM = 3 << 0, // Round towards Minus Infinity. + + // Aliases. + kRoundToNearest = RN, + kRoundToZero = RZ, + kRoundToPlusInf = RP, + kRoundToMinusInf = RM, + + mode_round = RN, + mode_ceil = RP, + mode_floor = RM, + mode_trunc = RZ +}; + +const uint32_t kFPURoundingModeMask = 3 << 0; + +enum CheckForInexactConversion { + kCheckForInexactConversion, + kDontCheckForInexactConversion +}; + +enum class MaxMinKind : int { kMin = 0, kMax = 1 }; + +// ----------------------------------------------------------------------------- +// Hints. + +// Branch hints are not used on the MIPS. They are defined so that they can +// appear in shared function signatures, but will be ignored in MIPS +// implementations. +enum Hint { no_hint = 0 }; + +inline Hint NegateHint(Hint hint) { return no_hint; } + +// ----------------------------------------------------------------------------- +// Specific instructions, constants, and masks. +// These constants are declared in assembler-mips.cc, as they use named +// registers and other constants. + +// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) +// operations as post-increment of sp. +extern const Instr kPopInstruction; +// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. +extern const Instr kPushInstruction; +// sw(r, MemOperand(sp, 0)) +extern const Instr kPushRegPattern; +// lw(r, MemOperand(sp, 0)) +extern const Instr kPopRegPattern; +extern const Instr kLwRegFpOffsetPattern; +extern const Instr kSwRegFpOffsetPattern; +extern const Instr kLwRegFpNegOffsetPattern; +extern const Instr kSwRegFpNegOffsetPattern; +// A mask for the Rt register for push, pop, lw, sw instructions. +extern const Instr kRtMask; +extern const Instr kLwSwInstrTypeMask; +extern const Instr kLwSwInstrArgumentMask; +extern const Instr kLwSwOffsetMask; + +// Break 0xfffff, reserved for redirected real time call. +const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6; +// A nop instruction. (Encoding of sll 0 0 0). +const Instr nopInstr = 0; + +static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) { + return 1ULL << (static_cast(opcode) >> kOpcodeShift); +} + +constexpr uint8_t kInstrSize = 4; +constexpr uint8_t kInstrSizeLog2 = 2; + +class InstructionBase { + public: + enum { + // On MIPS PC cannot actually be directly accessed. We behave as if PC was + // always the value of the current instruction being executed. + kPCReadOffset = 0 + }; + + // Instruction type. + enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 }; + + // Get the raw instruction bits. + inline Instr InstructionBits() const { + return *reinterpret_cast(this); + } + + // Set the raw instruction bits to value. + inline void SetInstructionBits(Instr value) { + *reinterpret_cast(this) = value; + } + + // Read one particular bit out of the instruction bits. + inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } + + // Read a bit field out of the instruction bits. + inline int Bits(int hi, int lo) const { + return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1); + } + + static constexpr uint64_t kOpcodeImmediateTypeMask = + OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) | + OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) | + OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) | + OpcodeToBitNumber(DADDI) | OpcodeToBitNumber(ADDIU) | + OpcodeToBitNumber(SLTI) | OpcodeToBitNumber(SLTIU) | + OpcodeToBitNumber(ANDI) | OpcodeToBitNumber(ORI) | + OpcodeToBitNumber(XORI) | OpcodeToBitNumber(LUI) | + OpcodeToBitNumber(BEQL) | OpcodeToBitNumber(BNEL) | + OpcodeToBitNumber(BLEZL) | OpcodeToBitNumber(BGTZL) | + OpcodeToBitNumber(POP66) | OpcodeToBitNumber(POP76) | + OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) | OpcodeToBitNumber(LWL) | + OpcodeToBitNumber(LW) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) | + OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) | + OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SWR) | + OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) | + OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) | + OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(BC) | + OpcodeToBitNumber(BALC); + +#define FunctionFieldToBitNumber(function) (1ULL << function) + + static const uint64_t kFunctionFieldRegisterTypeMask = + FunctionFieldToBitNumber(JR) | FunctionFieldToBitNumber(JALR) | + FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) | + FunctionFieldToBitNumber(SRL) | FunctionFieldToBitNumber(SRA) | + FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(SRLV) | + FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(LSA) | + FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) | + FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(MULTU) | + FunctionFieldToBitNumber(DIV) | FunctionFieldToBitNumber(DIVU) | + FunctionFieldToBitNumber(ADD) | FunctionFieldToBitNumber(ADDU) | + FunctionFieldToBitNumber(SUB) | FunctionFieldToBitNumber(SUBU) | + FunctionFieldToBitNumber(AND) | FunctionFieldToBitNumber(OR) | + FunctionFieldToBitNumber(XOR) | FunctionFieldToBitNumber(NOR) | + FunctionFieldToBitNumber(SLT) | FunctionFieldToBitNumber(SLTU) | + FunctionFieldToBitNumber(TGE) | FunctionFieldToBitNumber(TGEU) | + FunctionFieldToBitNumber(TLT) | FunctionFieldToBitNumber(TLTU) | + FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) | + FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) | + FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) | + FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC); + + // Accessors for the different named fields used in the MIPS encoding. + inline Opcode OpcodeValue() const { + return static_cast( + Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift)); + } + + inline int FunctionFieldRaw() const { + return InstructionBits() & kFunctionFieldMask; + } + + // Return the fields at their original place in the instruction encoding. + inline Opcode OpcodeFieldRaw() const { + return static_cast(InstructionBits() & kOpcodeMask); + } + + // Safe to call within InstructionType(). + inline int RsFieldRawNoAssert() const { + return InstructionBits() & kRsFieldMask; + } + + inline int SaFieldRaw() const { return InstructionBits() & kSaFieldMask; } + + // Get the encoding type of the instruction. + inline Type InstructionType() const; + + inline MSAMinorOpcode MSAMinorOpcodeField() const { + int op = this->FunctionFieldRaw(); + switch (op) { + case 0: + case 1: + case 2: + return kMsaMinorI8; + case 6: + return kMsaMinorI5; + case 7: + return (((this->InstructionBits() & kMsaI5I10Mask) == LDI) + ? kMsaMinorI10 + : kMsaMinorI5); + case 9: + case 10: + return kMsaMinorBIT; + case 13: + case 14: + case 15: + case 16: + case 17: + case 18: + case 19: + case 20: + case 21: + return kMsaMinor3R; + case 25: + return kMsaMinorELM; + case 26: + case 27: + case 28: + return kMsaMinor3RF; + case 30: + switch (this->RsFieldRawNoAssert()) { + case MSA_2R_FORMAT: + return kMsaMinor2R; + case MSA_2RF_FORMAT: + return kMsaMinor2RF; + default: + return kMsaMinorVEC; + } + break; + case 32: + case 33: + case 34: + case 35: + case 36: + case 37: + case 38: + case 39: + return kMsaMinorMI10; + default: + return kMsaMinorUndefined; + } + } + + protected: + InstructionBase() {} +}; + +template +class InstructionGetters : public T { + public: + inline int RsValue() const { + DCHECK(this->InstructionType() == InstructionBase::kRegisterType || + this->InstructionType() == InstructionBase::kImmediateType); + return InstructionBase::Bits(kRsShift + kRsBits - 1, kRsShift); + } + + inline int RtValue() const { + DCHECK(this->InstructionType() == InstructionBase::kRegisterType || + this->InstructionType() == InstructionBase::kImmediateType); + return this->Bits(kRtShift + kRtBits - 1, kRtShift); + } + + inline int RdValue() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); + return this->Bits(kRdShift + kRdBits - 1, kRdShift); + } + + inline int BaseValue() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kBaseShift + kBaseBits - 1, kBaseShift); + } + + inline int SaValue() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); + return this->Bits(kSaShift + kSaBits - 1, kSaShift); + } + + inline int LsaSaValue() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); + return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift); + } + + inline int FunctionValue() const { + DCHECK(this->InstructionType() == InstructionBase::kRegisterType || + this->InstructionType() == InstructionBase::kImmediateType); + return this->Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift); + } + + inline int FdValue() const { + return this->Bits(kFdShift + kFdBits - 1, kFdShift); + } + + inline int FsValue() const { + return this->Bits(kFsShift + kFsBits - 1, kFsShift); + } + + inline int FtValue() const { + return this->Bits(kFtShift + kFtBits - 1, kFtShift); + } + + inline int FrValue() const { + return this->Bits(kFrShift + kFrBits - 1, kFrShift); + } + + inline int WdValue() const { + return this->Bits(kWdShift + kWdBits - 1, kWdShift); + } + + inline int WsValue() const { + return this->Bits(kWsShift + kWsBits - 1, kWsShift); + } + + inline int WtValue() const { + return this->Bits(kWtShift + kWtBits - 1, kWtShift); + } + + inline int Bp2Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); + return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift); + } + + // Float Compare condition code instruction bits. + inline int FCccValue() const { + return this->Bits(kFCccShift + kFCccBits - 1, kFCccShift); + } + + // Float Branch condition code instruction bits. + inline int FBccValue() const { + return this->Bits(kFBccShift + kFBccBits - 1, kFBccShift); + } + + // Float Branch true/false instruction bit. + inline int FBtrueValue() const { + return this->Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift); + } + + // Return the fields at their original place in the instruction encoding. + inline Opcode OpcodeFieldRaw() const { + return static_cast(this->InstructionBits() & kOpcodeMask); + } + + inline int RsFieldRaw() const { + DCHECK(this->InstructionType() == InstructionBase::kRegisterType || + this->InstructionType() == InstructionBase::kImmediateType); + return this->InstructionBits() & kRsFieldMask; + } + + inline int RtFieldRaw() const { + DCHECK(this->InstructionType() == InstructionBase::kRegisterType || + this->InstructionType() == InstructionBase::kImmediateType); + return this->InstructionBits() & kRtFieldMask; + } + + inline int RdFieldRaw() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); + return this->InstructionBits() & kRdFieldMask; + } + + inline int SaFieldRaw() const { + return this->InstructionBits() & kSaFieldMask; + } + + inline int FunctionFieldRaw() const { + return this->InstructionBits() & kFunctionFieldMask; + } + + // Get the secondary field according to the opcode. + inline int SecondaryValue() const { + Opcode op = this->OpcodeFieldRaw(); + switch (op) { + case SPECIAL: + case SPECIAL2: + return FunctionValue(); + case COP1: + return RsValue(); + case REGIMM: + return RtValue(); + default: + return nullptrSF; + } + } + + inline int32_t ImmValue(int bits) const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(bits - 1, 0); + } + + inline int32_t Imm9Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kImm9Shift + kImm9Bits - 1, kImm9Shift); + } + + inline int32_t Imm16Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift); + } + + inline int32_t Imm18Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift); + } + + inline int32_t Imm19Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift); + } + + inline int32_t Imm21Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift); + } + + inline int32_t Imm26Value() const { + DCHECK((this->InstructionType() == InstructionBase::kJumpType) || + (this->InstructionType() == InstructionBase::kImmediateType)); + return this->Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift); + } + + inline int32_t MsaImm8Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kMsaImm8Shift + kMsaImm8Bits - 1, kMsaImm8Shift); + } + + inline int32_t MsaImm5Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kMsaImm5Shift + kMsaImm5Bits - 1, kMsaImm5Shift); + } + + inline int32_t MsaImm10Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kMsaImm10Shift + kMsaImm10Bits - 1, kMsaImm10Shift); + } + + inline int32_t MsaImmMI10Value() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(kMsaImmMI10Shift + kMsaImmMI10Bits - 1, kMsaImmMI10Shift); + } + + inline int32_t MsaBitDf() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + int32_t df_m = this->Bits(22, 16); + if (((df_m >> 6) & 1U) == 0) { + return 3; + } else if (((df_m >> 5) & 3U) == 2) { + return 2; + } else if (((df_m >> 4) & 7U) == 6) { + return 1; + } else if (((df_m >> 3) & 15U) == 14) { + return 0; + } else { + return -1; + } + } + + inline int32_t MsaBitMValue() const { + DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); + return this->Bits(16 + this->MsaBitDf() + 3, 16); + } + + inline int32_t MsaElmDf() const { + DCHECK(this->InstructionType() == InstructionBase::kRegisterType || + this->InstructionType() == InstructionBase::kImmediateType); + int32_t df_n = this->Bits(21, 16); + if (((df_n >> 4) & 3U) == 0) { + return 0; + } else if (((df_n >> 3) & 7U) == 4) { + return 1; + } else if (((df_n >> 2) & 15U) == 12) { + return 2; + } else if (((df_n >> 1) & 31U) == 28) { + return 3; + } else { + return -1; + } + } + + inline int32_t MsaElmNValue() const { + DCHECK(this->InstructionType() == InstructionBase::kRegisterType || + this->InstructionType() == InstructionBase::kImmediateType); + return this->Bits(16 + 4 - this->MsaElmDf(), 16); + } + + static bool IsForbiddenAfterBranchInstr(Instr instr); + + // Say if the instruction should not be used in a branch delay slot or + // immediately after a compact branch. + inline bool IsForbiddenAfterBranch() const { + return IsForbiddenAfterBranchInstr(this->InstructionBits()); + } + + inline bool IsForbiddenInBranchDelay() const { + return IsForbiddenAfterBranch(); + } + + // Say if the instruction 'links'. e.g. jal, bal. + bool IsLinkingInstruction() const; + // Say if the instruction is a break or a trap. + bool IsTrap() const; + + inline bool IsMSABranchInstr() const { + if (this->OpcodeFieldRaw() == COP1) { + switch (this->RsFieldRaw()) { + case BZ_V: + case BZ_B: + case BZ_H: + case BZ_W: + case BZ_D: + case BNZ_V: + case BNZ_B: + case BNZ_H: + case BNZ_W: + case BNZ_D: + return true; + default: + return false; + } + } + return false; + } + + inline bool IsMSAInstr() const { + if (this->IsMSABranchInstr() || (this->OpcodeFieldRaw() == MSA)) + return true; + return false; + } +}; + +class Instruction : public InstructionGetters { + public: + // Instructions are read of out a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instruction. + // Use the At(pc) function to create references to Instruction. + static Instruction* At(byte* pc) { + return reinterpret_cast(pc); + } + + private: + // We need to prevent the creation of instances of class Instruction. + DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); +}; + +// ----------------------------------------------------------------------------- +// MIPS assembly various constants. + +// C/C++ argument slots size. +const int kCArgSlotCount = 4; +const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize; + +// JS argument slots size. +const int kJSArgsSlotsSize = 0 * kInstrSize; + +// Assembly builtins argument slots size. +const int kBArgsSlotsSize = 0 * kInstrSize; + +const int kBranchReturnOffset = 2 * kInstrSize; + +InstructionBase::Type InstructionBase::InstructionType() const { + switch (OpcodeFieldRaw()) { + case SPECIAL: + if (FunctionFieldToBitNumber(FunctionFieldRaw()) & + kFunctionFieldRegisterTypeMask) { + return kRegisterType; + } + return kUnsupported; + case SPECIAL2: + switch (FunctionFieldRaw()) { + case MUL: + case CLZ: + return kRegisterType; + default: + return kUnsupported; + } + break; + case SPECIAL3: + switch (FunctionFieldRaw()) { + case INS: + case EXT: + return kRegisterType; + case BSHFL: { + int sa = SaFieldRaw() >> kSaShift; + switch (sa) { + case BITSWAP: + case WSBH: + case SEB: + case SEH: + return kRegisterType; + } + sa >>= kBp2Bits; + switch (sa) { + case ALIGN: + return kRegisterType; + default: + return kUnsupported; + } + } + case LL_R6: + case SC_R6: { + DCHECK(IsMipsArchVariant(kMips32r6)); + return kImmediateType; + } + default: + return kUnsupported; + } + break; + case COP1: // Coprocessor instructions. + switch (RsFieldRawNoAssert()) { + case BC1: // Branch on coprocessor condition. + case BC1EQZ: + case BC1NEZ: + return kImmediateType; + // MSA Branch instructions + case BZ_V: + case BNZ_V: + case BZ_B: + case BZ_H: + case BZ_W: + case BZ_D: + case BNZ_B: + case BNZ_H: + case BNZ_W: + case BNZ_D: + return kImmediateType; + default: + return kRegisterType; + } + break; + case COP1X: + return kRegisterType; + + // 26 bits immediate type instructions. e.g.: j imm26. + case J: + case JAL: + return kJumpType; + + case MSA: + switch (MSAMinorOpcodeField()) { + case kMsaMinor3R: + case kMsaMinor3RF: + case kMsaMinorVEC: + case kMsaMinor2R: + case kMsaMinor2RF: + return kRegisterType; + case kMsaMinorELM: + switch (InstructionBits() & kMsaLongerELMMask) { + case CFCMSA: + case CTCMSA: + case MOVE_V: + return kRegisterType; + default: + return kImmediateType; + } + default: + return kImmediateType; + } + + default: + return kImmediateType; + } +} + +#undef OpcodeToBitNumber +#undef FunctionFieldToBitNumber + +// ----------------------------------------------------------------------------- +// Instructions. + +template +bool InstructionGetters

::IsLinkingInstruction() const { + uint32_t op = this->OpcodeFieldRaw(); + switch (op) { + case JAL: + return true; + case POP76: + if (this->RsFieldRawNoAssert() == JIALC) + return true; // JIALC + else + return false; // BNEZC + case REGIMM: + switch (this->RtFieldRaw()) { + case BGEZAL: + case BLTZAL: + return true; + default: + return false; + } + case SPECIAL: + switch (this->FunctionFieldRaw()) { + case JALR: + return true; + default: + return false; + } + default: + return false; + } +} + +template +bool InstructionGetters

::IsTrap() const { + if (this->OpcodeFieldRaw() != SPECIAL) { + return false; + } else { + switch (this->FunctionFieldRaw()) { + case BREAK: + case TGE: + case TGEU: + case TLT: + case TLTU: + case TEQ: + case TNE: + return true; + default: + return false; + } + } +} + +// static +template +bool InstructionGetters::IsForbiddenAfterBranchInstr(Instr instr) { + Opcode opcode = static_cast(instr & kOpcodeMask); + switch (opcode) { + case J: + case JAL: + case BEQ: + case BNE: + case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc + case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc + case BEQL: + case BNEL: + case BLEZL: // POP26 bgezc, blezc, bgec/blec + case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc + case BC: + case BALC: + case POP10: // beqzalc, bovc, beqc + case POP30: // bnezalc, bnvc, bnec + case POP66: // beqzc, jic + case POP76: // bnezc, jialc + return true; + case REGIMM: + switch (instr & kRtFieldMask) { + case BLTZ: + case BGEZ: + case BLTZAL: + case BGEZAL: + return true; + default: + return false; + } + break; + case SPECIAL: + switch (instr & kFunctionFieldMask) { + case JR: + case JALR: + return true; + default: + return false; + } + break; + case COP1: + switch (instr & kRsFieldMask) { + case BC1: + case BC1EQZ: + case BC1NEZ: + case BZ_V: + case BZ_B: + case BZ_H: + case BZ_W: + case BZ_D: + case BNZ_V: + case BNZ_B: + case BNZ_H: + case BNZ_W: + case BNZ_D: + return true; + break; + default: + return false; + } + break; + default: + return false; + } +} +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_ diff --git a/deps/v8/src/codegen/mips/cpu-mips.cc b/deps/v8/src/codegen/mips/cpu-mips.cc new file mode 100644 index 00000000000000..a7120d1c7a9855 --- /dev/null +++ b/deps/v8/src/codegen/mips/cpu-mips.cc @@ -0,0 +1,45 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// CPU specific code for arm independent of OS goes here. + +#include +#include + +#ifdef __mips +#include +#endif // #ifdef __mips + +#if V8_TARGET_ARCH_MIPS + +#include "src/codegen/cpu-features.h" + +namespace v8 { +namespace internal { + +void CpuFeatures::FlushICache(void* start, size_t size) { +#if !defined(USE_SIMULATOR) + // Nothing to do, flushing no instructions. + if (size == 0) { + return; + } + +#if defined(ANDROID) + // Bionic cacheflush can typically run in userland, avoiding kernel call. + char* end = reinterpret_cast(start) + size; + cacheflush(reinterpret_cast(start), reinterpret_cast(end), + 0); +#else // ANDROID + int res; + // See http://www.linux-mips.org/wiki/Cacheflush_Syscall. + res = syscall(__NR_cacheflush, start, size, ICACHE); + if (res) FATAL("Failed to flush the instruction cache"); +#endif // ANDROID +#endif // !USE_SIMULATOR. +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h new file mode 100644 index 00000000000000..b9025b032c433e --- /dev/null +++ b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h @@ -0,0 +1,315 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_ +#define V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_ + +#if V8_TARGET_ARCH_MIPS + +#include "src/codegen/interface-descriptors.h" +#include "src/execution/frames.h" + +namespace v8 { +namespace internal { + +constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { + auto registers = RegisterArray(a0, a1, a2, a3, t0); + STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams); + return registers; +} + +#if DEBUG +template +void StaticCallInterfaceDescriptor:: + VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) { + RegList allocatable_regs = data->allocatable_registers(); + if (argc >= 1) DCHECK(allocatable_regs.has(a0)); + if (argc >= 2) DCHECK(allocatable_regs.has(a1)); + if (argc >= 3) DCHECK(allocatable_regs.has(a2)); + if (argc >= 4) DCHECK(allocatable_regs.has(a3)); + // Additional arguments are passed on the stack. +} +#endif // DEBUG + +// static +constexpr auto WriteBarrierDescriptor::registers() { + return RegisterArray(a1, t1, t0, a0, a2, v0, a3, kContextRegister); +} + +// static +constexpr Register LoadDescriptor::ReceiverRegister() { return a1; } +// static +constexpr Register LoadDescriptor::NameRegister() { return a2; } +// static +constexpr Register LoadDescriptor::SlotRegister() { return a0; } + +// static +constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; } + +// static +constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() { + return a1; +} +// static +constexpr Register KeyedLoadBaselineDescriptor::NameRegister() { + return kInterpreterAccumulatorRegister; +} +// static +constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; } + +// static +constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { + return a3; +} + +// static +constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { + return kInterpreterAccumulatorRegister; +} +// static +constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; } +// static +constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; } + +// static +constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() { + return a3; +} + +// static +constexpr Register +LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { + return t0; +} + +// static +constexpr Register StoreDescriptor::ReceiverRegister() { return a1; } +// static +constexpr Register StoreDescriptor::NameRegister() { return a2; } +// static +constexpr Register StoreDescriptor::ValueRegister() { return a0; } +// static +constexpr Register StoreDescriptor::SlotRegister() { return t0; } + +// static +constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; } + +// static +constexpr Register StoreTransitionDescriptor::MapRegister() { return t1; } + +// static +constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; } +// static +constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; } + +// static +constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; } +// static +constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; } + +// static +constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { + return a2; +} + +// static +constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { + // TODO(v8:11421): Implement on this platform. + return a3; +} + +// static +constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; } + +// static +constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); } + +// static +constexpr auto CallTrampolineDescriptor::registers() { + // a1: target + // a0: number of arguments + return RegisterArray(a1, a0); +} + +// static +constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() { + // a1 : the source + // a0 : the excluded property count + return RegisterArray(a1, a0); +} + +// static +constexpr auto +CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() { + // a1 : the source + // a0 : the excluded property count + // a2 : the excluded property base + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto CallVarargsDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // t0 : arguments list length (untagged) + // a2 : arguments list (FixedArray) + return RegisterArray(a1, a0, t0, a2); +} + +// static +constexpr auto CallForwardVarargsDescriptor::registers() { + // a1: the target to call + // a0: number of arguments + // a2: start index (to support rest parameters) + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto CallFunctionTemplateDescriptor::registers() { + // a1 : function template info + // a0 : number of arguments (on the stack) + return RegisterArray(a1, a0); +} + +// static +constexpr auto CallWithSpreadDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // a2 : the object to spread + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto CallWithArrayLikeDescriptor::registers() { + // a1 : the target to call + // a2 : the arguments list + return RegisterArray(a1, a2); +} + +// static +constexpr auto ConstructVarargsDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // a3 : the new target + // t0 : arguments list length (untagged) + // a2 : arguments list (FixedArray) + return RegisterArray(a1, a3, a0, t0, a2); +} + +// static +constexpr auto ConstructForwardVarargsDescriptor::registers() { + // a1: the target to call + // a3: new target + // a0: number of arguments + // a2: start index (to support rest parameters) + return RegisterArray(a1, a3, a0, a2); +} + +// static +constexpr auto ConstructWithSpreadDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // a3 : the new target + // a2 : the object to spread + return RegisterArray(a1, a3, a0, a2); +} + +// static +constexpr auto ConstructWithArrayLikeDescriptor::registers() { + // a1 : the target to call + // a3 : the new target + // a2 : the arguments list + return RegisterArray(a1, a3, a2); +} + +// static +constexpr auto ConstructStubDescriptor::registers() { + // a1: target + // a3: new target + // a0: number of arguments + // a2: allocation site or undefined + return RegisterArray(a1, a3, a0, a2); +} + +// static +constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); } + +// static +constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); } + +// static +constexpr auto Compare_BaselineDescriptor::registers() { + // a1: left operand + // a0: right operand + // a2: feedback slot + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); } + +// static +constexpr auto BinaryOp_BaselineDescriptor::registers() { + // TODO(v8:11421): Implement on this platform. + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto BinarySmiOp_BaselineDescriptor::registers() { + // TODO(v8:11421): Implement on this platform. + return RegisterArray(a0, a1, a2); +} + +// static +constexpr auto ApiCallbackDescriptor::registers() { + // a1 : kApiFunctionAddress + // a2 : kArgc + // a3 : kCallData + // a0 : kHolder + return RegisterArray(a1, a2, a3, a0); +} + +// static +constexpr auto InterpreterDispatchDescriptor::registers() { + return RegisterArray( + kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister); +} + +// static +constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { + // a0 : argument count + // a2 : address of first argument + // a1 : the target callable to be call + return RegisterArray(a0, a2, a1); +} + +// static +constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() { + // a0 : argument count + // t4 : address of the first argument + // a1 : constructor to call + // a3 : new target + // a2 : allocation site feedback if available, undefined otherwise + return RegisterArray(a0, t4, a1, a3, a2); +} + +// static +constexpr auto ResumeGeneratorDescriptor::registers() { + // v0 : the value to pass to the generator + // a1 : the JSGeneratorObject to resume + return RegisterArray(v0, a1); +} + +// static +constexpr auto RunMicrotasksEntryDescriptor::registers() { + return RegisterArray(a0, a1); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_MIPS + +#endif // V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_ diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc new file mode 100644 index 00000000000000..b911fb9bfb108f --- /dev/null +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc @@ -0,0 +1,5638 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include // For LONG_MIN, LONG_MAX. + +#if V8_TARGET_ARCH_MIPS + +#include "src/base/bits.h" +#include "src/base/division-by-constant.h" +#include "src/codegen/assembler-inl.h" +#include "src/codegen/callable.h" +#include "src/codegen/code-factory.h" +#include "src/codegen/external-reference-table.h" +#include "src/codegen/interface-descriptors-inl.h" +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/debug/debug.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/execution/frames-inl.h" +#include "src/heap/memory-chunk.h" +#include "src/init/bootstrapper.h" +#include "src/logging/counters.h" +#include "src/objects/heap-number.h" +#include "src/runtime/runtime.h" +#include "src/snapshot/snapshot.h" + +#if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/wasm-code-manager.h" +#endif // V8_ENABLE_WEBASSEMBLY + +// Satisfy cpplint check, but don't include platform-specific header. It is +// included recursively via macro-assembler.h. +#if 0 +#include "src/codegen/mips/macro-assembler-mips.h" +#endif + +namespace v8 { +namespace internal { + +static inline bool IsZero(const Operand& rt) { + if (rt.is_reg()) { + return rt.rm() == zero_reg; + } else { + return rt.immediate() == 0; + } +} + +int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1, + Register exclusion2, + Register exclusion3) const { + int bytes = 0; + + RegList exclusions = {exclusion1, exclusion2, exclusion3}; + RegList list = kJSCallerSaved - exclusions; + bytes += list.Count() * kPointerSize; + + if (fp_mode == SaveFPRegsMode::kSave) { + bytes += kCallerSavedFPU.Count() * kDoubleSize; + } + + return bytes; +} + +int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, + Register exclusion2, Register exclusion3) { + ASM_CODE_COMMENT(this); + int bytes = 0; + + RegList exclusions = {exclusion1, exclusion2, exclusion3}; + RegList list = kJSCallerSaved - exclusions; + MultiPush(list); + bytes += list.Count() * kPointerSize; + + if (fp_mode == SaveFPRegsMode::kSave) { + MultiPushFPU(kCallerSavedFPU); + bytes += kCallerSavedFPU.Count() * kDoubleSize; + } + + return bytes; +} + +int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, + Register exclusion2, Register exclusion3) { + ASM_CODE_COMMENT(this); + int bytes = 0; + if (fp_mode == SaveFPRegsMode::kSave) { + MultiPopFPU(kCallerSavedFPU); + bytes += kCallerSavedFPU.Count() * kDoubleSize; + } + + RegList exclusions = {exclusion1, exclusion2, exclusion3}; + RegList list = kJSCallerSaved - exclusions; + MultiPop(list); + bytes += list.Count() * kPointerSize; + + return bytes; +} + +void TurboAssembler::LoadRoot(Register destination, RootIndex index) { + lw(destination, + MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); +} + +void TurboAssembler::LoadRoot(Register destination, RootIndex index, + Condition cond, Register src1, + const Operand& src2) { + Branch(2, NegateCondition(cond), src1, src2); + lw(destination, + MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); +} + +void TurboAssembler::PushCommonFrame(Register marker_reg) { + if (marker_reg.is_valid()) { + Push(ra, fp, marker_reg); + Addu(fp, sp, Operand(kPointerSize)); + } else { + Push(ra, fp); + mov(fp, sp); + } +} + +void TurboAssembler::PushStandardFrame(Register function_reg) { + int offset = -StandardFrameConstants::kContextOffset; + if (function_reg.is_valid()) { + Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); + offset += 2 * kPointerSize; + } else { + Push(ra, fp, cp, kJavaScriptCallArgCountRegister); + offset += kPointerSize; + } + Addu(fp, sp, Operand(offset)); +} + +// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) +// The register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWriteField(Register object, int offset, + Register value, Register dst, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + ASM_CODE_COMMENT(this); + DCHECK(!AreAliased(value, dst, t8, object)); + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. + Label done; + + // Skip barrier if writing a smi. + if (smi_check == SmiCheck::kInline) { + JumpIfSmi(value, &done); + } + + // Although the object register is tagged, the offset is relative to the start + // of the object, so offset must be a multiple of kPointerSize. + DCHECK(IsAligned(offset, kPointerSize)); + + Addu(dst, object, Operand(offset - kHeapObjectTag)); + if (FLAG_debug_code) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label ok; + And(t8, dst, Operand(kPointerSize - 1)); + Branch(&ok, eq, t8, Operand(zero_reg)); + stop(); + bind(&ok); + } + + RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action, + SmiCheck::kOmit); + + bind(&done); + + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. + if (FLAG_debug_code) { + li(value, Operand(bit_cast(kZapValue + 4))); + li(dst, Operand(bit_cast(kZapValue + 8))); + } +} + +void TurboAssembler::MaybeSaveRegisters(RegList registers) { + if (registers.is_empty()) return; + MultiPush(registers); +} + +void TurboAssembler::MaybeRestoreRegisters(RegList registers) { + if (registers.is_empty()) return; + MultiPop(registers); +} + +void TurboAssembler::CallEphemeronKeyBarrier(Register object, + Register slot_address, + SaveFPRegsMode fp_mode) { + ASM_CODE_COMMENT(this); + DCHECK(!AreAliased(object, slot_address)); + RegList registers = + WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); + MaybeSaveRegisters(registers); + + Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address_parameter = + WriteBarrierDescriptor::SlotAddressRegister(); + + Push(object); + Push(slot_address); + Pop(slot_address_parameter); + Pop(object_parameter); + + Call(isolate()->builtins()->code_handle( + Builtins::GetEphemeronKeyBarrierStub(fp_mode)), + RelocInfo::CODE_TARGET); + MaybeRestoreRegisters(registers); +} + +void TurboAssembler::CallRecordWriteStubSaveRegisters( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode) { + DCHECK(!AreAliased(object, slot_address)); + RegList registers = + WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); + MaybeSaveRegisters(registers); + + Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address_parameter = + WriteBarrierDescriptor::SlotAddressRegister(); + + Push(object); + Push(slot_address); + Pop(slot_address_parameter); + Pop(object_parameter); + + CallRecordWriteStub(object_parameter, slot_address_parameter, + remembered_set_action, fp_mode, mode); + + MaybeRestoreRegisters(registers); +} + +void TurboAssembler::CallRecordWriteStub( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode) { + // Use CallRecordWriteStubSaveRegisters if the object and slot registers + // need to be caller saved. + DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); + DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); +#if V8_ENABLE_WEBASSEMBLY + if (mode == StubCallMode::kCallWasmRuntimeStub) { + auto wasm_target = + wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode); + Call(wasm_target, RelocInfo::WASM_STUB_CALL); +#else + if (false) { +#endif + } else { + Builtin builtin = + Builtins::GetRecordWriteStub(remembered_set_action, fp_mode); + if (options().inline_offheap_trampolines) { + // Inline the trampoline. + RecordCommentForOffHeapTrampoline(builtin); + li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); + Call(t9); + RecordComment("]"); + } else { + Handle code_target = isolate()->builtins()->code_handle(builtin); + Call(code_target, RelocInfo::CODE_TARGET); + } + } +} + +// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) +// The register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWrite(Register object, Register address, + Register value, RAStatus ra_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + DCHECK(!AreAliased(object, address, value, t8)); + DCHECK(!AreAliased(object, address, value, t9)); + + if (FLAG_debug_code) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(!AreAliased(object, value, scratch)); + lw(scratch, MemOperand(address)); + Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch, + Operand(value)); + } + + if ((remembered_set_action == RememberedSetAction::kOmit && + !FLAG_incremental_marking) || + FLAG_disable_write_barriers) { + return; + } + + // First, check if a write barrier is even needed. The tests below + // catch stores of smis and stores into the young generation. + Label done; + + if (smi_check == SmiCheck::kInline) { + DCHECK_EQ(0, kSmiTag); + JumpIfSmi(value, &done); + } + + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); + + // Record the actual write. + if (ra_status == kRAHasNotBeenSaved) { + push(ra); + } + + Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); + DCHECK(!AreAliased(object, slot_address, value)); + mov(slot_address, address); + CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode); + + if (ra_status == kRAHasNotBeenSaved) { + pop(ra); + } + + bind(&done); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (FLAG_debug_code) { + li(address, Operand(bit_cast(kZapValue + 12))); + li(value, Operand(bit_cast(kZapValue + 16))); + li(slot_address, Operand(bit_cast(kZapValue + 20))); + } +} + +// --------------------------------------------------------------------------- +// Instruction macros. + +void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + addu(rd, rs, rt.rm()); + } else { + if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) { + addiu(rd, rs, rt.immediate()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + addu(rd, rs, scratch); + } + } +} + +void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + subu(rd, rs, rt.rm()); + } else { + if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) { + addiu(rd, rs, -rt.immediate()); // No subiu instr, use addiu(x, y, -imm). + } else if (!(-rt.immediate() & kHiMask) && + !MustUseReg(rt.rmode())) { // Use load + // -imm and addu for cases where loading -imm generates one instruction. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, -rt.immediate()); + addu(rd, rs, scratch); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + subu(rd, rs, scratch); + } + } +} + +void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (IsMipsArchVariant(kLoongson)) { + mult(rs, rt.rm()); + mflo(rd); + } else { + mul(rd, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (IsMipsArchVariant(kLoongson)) { + mult(rs, scratch); + mflo(rd); + } else { + mul(rd, rs, scratch); + } + } +} + +void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs, + const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + mult(rs, rt.rm()); + mflo(rd_lo); + mfhi(rd_hi); + } else { + if (rd_lo == rs) { + DCHECK(rd_hi != rs); + DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm()); + muh(rd_hi, rs, rt.rm()); + mul(rd_lo, rs, rt.rm()); + } else { + DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm()); + mul(rd_lo, rs, rt.rm()); + muh(rd_hi, rs, rt.rm()); + } + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + mult(rs, scratch); + mflo(rd_lo); + mfhi(rd_hi); + } else { + if (rd_lo == rs) { + DCHECK(rd_hi != rs); + DCHECK(rd_hi != scratch && rd_lo != scratch); + muh(rd_hi, rs, scratch); + mul(rd_lo, rs, scratch); + } else { + DCHECK(rd_hi != scratch && rd_lo != scratch); + mul(rd_lo, rs, scratch); + muh(rd_hi, rs, scratch); + } + } + } +} + +void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs, + const Operand& rt) { + Register reg = no_reg; + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + if (rt.is_reg()) { + reg = rt.rm(); + } else { + DCHECK(rs != scratch); + reg = scratch; + li(reg, rt); + } + + if (!IsMipsArchVariant(kMips32r6)) { + multu(rs, reg); + mflo(rd_lo); + mfhi(rd_hi); + } else { + if (rd_lo == rs) { + DCHECK(rd_hi != rs); + DCHECK(rd_hi != reg && rd_lo != reg); + muhu(rd_hi, rs, reg); + mulu(rd_lo, rs, reg); + } else { + DCHECK(rd_hi != reg && rd_lo != reg); + mulu(rd_lo, rs, reg); + muhu(rd_hi, rs, reg); + } + } +} + +void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + mult(rs, rt.rm()); + mfhi(rd); + } else { + muh(rd, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + mult(rs, scratch); + mfhi(rd); + } else { + muh(rd, rs, scratch); + } + } +} + +void TurboAssembler::Mult(Register rs, const Operand& rt) { + if (rt.is_reg()) { + mult(rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + mult(rs, scratch); + } +} + +void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + multu(rs, rt.rm()); + mfhi(rd); + } else { + muhu(rd, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + multu(rs, scratch); + mfhi(rd); + } else { + muhu(rd, rs, scratch); + } + } +} + +void TurboAssembler::Multu(Register rs, const Operand& rt) { + if (rt.is_reg()) { + multu(rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + multu(rs, scratch); + } +} + +void TurboAssembler::Div(Register rs, const Operand& rt) { + if (rt.is_reg()) { + div(rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + div(rs, scratch); + } +} + +void TurboAssembler::Div(Register rem, Register res, Register rs, + const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + div(rs, rt.rm()); + mflo(res); + mfhi(rem); + } else { + div(res, rs, rt.rm()); + mod(rem, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + div(rs, scratch); + mflo(res); + mfhi(rem); + } else { + div(res, rs, scratch); + mod(rem, rs, scratch); + } + } +} + +void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + div(rs, rt.rm()); + mflo(res); + } else { + div(res, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + div(rs, scratch); + mflo(res); + } else { + div(res, rs, scratch); + } + } +} + +void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + div(rs, rt.rm()); + mfhi(rd); + } else { + mod(rd, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + div(rs, scratch); + mfhi(rd); + } else { + mod(rd, rs, scratch); + } + } +} + +void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + divu(rs, rt.rm()); + mfhi(rd); + } else { + modu(rd, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + divu(rs, scratch); + mfhi(rd); + } else { + modu(rd, rs, scratch); + } + } +} + +void TurboAssembler::Divu(Register rs, const Operand& rt) { + if (rt.is_reg()) { + divu(rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + divu(rs, scratch); + } +} + +void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (!IsMipsArchVariant(kMips32r6)) { + divu(rs, rt.rm()); + mflo(res); + } else { + divu(res, rs, rt.rm()); + } + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + if (!IsMipsArchVariant(kMips32r6)) { + divu(rs, scratch); + mflo(res); + } else { + divu(res, rs, scratch); + } + } +} + +void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + and_(rd, rs, rt.rm()); + } else { + if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { + andi(rd, rs, rt.immediate()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + and_(rd, rs, scratch); + } + } +} + +void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + or_(rd, rs, rt.rm()); + } else { + if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { + ori(rd, rs, rt.immediate()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + or_(rd, rs, scratch); + } + } +} + +void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + xor_(rd, rs, rt.rm()); + } else { + if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { + xori(rd, rs, rt.immediate()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + xor_(rd, rs, scratch); + } + } +} + +void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + nor(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + nor(rd, rs, scratch); + } +} + +void TurboAssembler::Neg(Register rs, const Operand& rt) { + subu(rs, zero_reg, rt.rm()); +} + +void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + slt(rd, rs, rt.rm()); + } else { + if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) { + slti(rd, rs, rt.immediate()); + } else { + // li handles the relocation. + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = rd == at ? t8 : temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + slt(rd, rs, scratch); + } + } +} + +void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sltu(rd, rs, rt.rm()); + } else { + const uint32_t int16_min = std::numeric_limits::min(); + if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) { + // Imm range is: [0, 32767]. + sltiu(rd, rs, rt.immediate()); + } else if (is_uint15(rt.immediate() - int16_min) && + !MustUseReg(rt.rmode())) { + // Imm range is: [max_unsigned-32767,max_unsigned]. + sltiu(rd, rs, static_cast(rt.immediate())); + } else { + // li handles the relocation. + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = rd == at ? t8 : temps.Acquire(); + DCHECK(rs != scratch); + li(scratch, rt); + sltu(rd, rs, scratch); + } + } +} + +void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + slt(rd, rt.rm(), rs); + } else { + // li handles the relocation. + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + DCHECK(rs != scratch); + li(scratch, rt); + slt(rd, scratch, rs); + } + xori(rd, rd, 1); +} + +void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sltu(rd, rt.rm(), rs); + } else { + // li handles the relocation. + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + DCHECK(rs != scratch); + li(scratch, rt); + sltu(rd, scratch, rs); + } + xori(rd, rd, 1); +} + +void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) { + Slt(rd, rs, rt); + xori(rd, rd, 1); +} + +void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { + Sltu(rd, rs, rt); + xori(rd, rd, 1); +} + +void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + slt(rd, rt.rm(), rs); + } else { + // li handles the relocation. + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + DCHECK(rs != scratch); + li(scratch, rt); + slt(rd, scratch, rs); + } +} + +void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sltu(rd, rt.rm(), rs); + } else { + // li handles the relocation. + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + DCHECK(rs != scratch); + li(scratch, rt); + sltu(rd, scratch, rs); + } +} + +void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + if (rt.is_reg()) { + rotrv(rd, rs, rt.rm()); + } else { + rotr(rd, rs, rt.immediate() & 0x1F); + } + } else { + if (rt.is_reg()) { + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + subu(scratch, zero_reg, rt.rm()); + sllv(scratch, rs, scratch); + srlv(rd, rs, rt.rm()); + or_(rd, rd, scratch); + } else { + if (rt.immediate() == 0) { + srl(rd, rs, 0); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + srl(scratch, rs, rt.immediate() & 0x1F); + sll(rd, rs, (0x20 - (rt.immediate() & 0x1F)) & 0x1F); + or_(rd, rd, scratch); + } + } + } +} + +void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { + if (IsMipsArchVariant(kLoongson)) { + lw(zero_reg, rs); + } else { + pref(hint, rs); + } +} + +void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, + Register scratch) { + DCHECK(sa >= 1 && sa <= 31); + if (IsMipsArchVariant(kMips32r6) && sa <= 4) { + lsa(rd, rt, rs, sa - 1); + } else { + Register tmp = rd == rt ? scratch : rd; + DCHECK(tmp != rt); + sll(tmp, rs, sa); + Addu(rd, rt, tmp); + } +} + +void TurboAssembler::Bovc(Register rs, Register rt, Label* L) { + if (is_trampoline_emitted()) { + Label skip; + bnvc(rs, rt, &skip); + BranchLong(L, PROTECT); + bind(&skip); + } else { + bovc(rs, rt, L); + } +} + +void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) { + if (is_trampoline_emitted()) { + Label skip; + bovc(rs, rt, &skip); + BranchLong(L, PROTECT); + bind(&skip); + } else { + bnvc(rs, rt, L); + } +} + +// ------------Pseudo-instructions------------- + +// Word Swap Byte +void TurboAssembler::ByteSwapSigned(Register dest, Register src, + int operand_size) { + DCHECK(operand_size == 2 || operand_size == 4); + + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + if (operand_size == 2) { + wsbh(dest, src); + seh(dest, dest); + } else { + wsbh(dest, src); + rotr(dest, dest, 16); + } + } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { + if (operand_size == 2) { + DCHECK(src != at && dest != at); + srl(at, src, 8); + andi(at, at, 0xFF); + sll(dest, src, 8); + or_(dest, dest, at); + + // Sign-extension + sll(dest, dest, 16); + sra(dest, dest, 16); + } else { + BlockTrampolinePoolScope block_trampoline_pool(this); + Register tmp = at; + Register tmp2 = t8; + DCHECK(dest != tmp && dest != tmp2); + DCHECK(src != tmp && src != tmp2); + + andi(tmp2, src, 0xFF); + sll(tmp, tmp2, 24); + + andi(tmp2, src, 0xFF00); + sll(tmp2, tmp2, 8); + or_(tmp, tmp, tmp2); + + srl(tmp2, src, 8); + andi(tmp2, tmp2, 0xFF00); + or_(tmp, tmp, tmp2); + + srl(tmp2, src, 24); + or_(dest, tmp, tmp2); + } + } +} + +void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, + int operand_size) { + DCHECK_EQ(operand_size, 2); + + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + wsbh(dest, src); + andi(dest, dest, 0xFFFF); + } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { + DCHECK(src != at && dest != at); + srl(at, src, 8); + andi(at, at, 0xFF); + sll(dest, src, 8); + or_(dest, dest, at); + + // Zero-extension + andi(dest, dest, 0xFFFF); + } +} + +void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { + DCHECK(rd != at); + DCHECK(rs.rm() != at); + if (IsMipsArchVariant(kMips32r6)) { + lw(rd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3); + MemOperand source = rs; + // Adjust offset for two accesses and check if offset + 3 fits into int16_t. + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); + if (rd != source.rm()) { + lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset)); + lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset)); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); + lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); + mov(rd, scratch); + } + } +} + +void TurboAssembler::Usw(Register rd, const MemOperand& rs) { + DCHECK(rd != at); + DCHECK(rs.rm() != at); + DCHECK(rd != rs.rm()); + if (IsMipsArchVariant(kMips32r6)) { + sw(rd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3); + MemOperand source = rs; + // Adjust offset for two accesses and check if offset + 3 fits into int16_t. + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); + swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset)); + swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset)); + } +} + +void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { + DCHECK(rd != at); + DCHECK(rs.rm() != at); + if (IsMipsArchVariant(kMips32r6)) { + lh(rd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + MemOperand source = rs; + // Adjust offset for two accesses and check if offset + 1 fits into int16_t. + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + if (source.rm() == scratch) { +#if defined(V8_TARGET_LITTLE_ENDIAN) + lb(rd, MemOperand(source.rm(), source.offset() + 1)); + lbu(scratch, source); +#elif defined(V8_TARGET_BIG_ENDIAN) + lb(rd, source); + lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); +#endif + } else { +#if defined(V8_TARGET_LITTLE_ENDIAN) + lbu(scratch, source); + lb(rd, MemOperand(source.rm(), source.offset() + 1)); +#elif defined(V8_TARGET_BIG_ENDIAN) + lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); + lb(rd, source); +#endif + } + sll(rd, rd, 8); + or_(rd, rd, scratch); + } +} + +void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { + DCHECK(rd != at); + DCHECK(rs.rm() != at); + if (IsMipsArchVariant(kMips32r6)) { + lhu(rd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + MemOperand source = rs; + // Adjust offset for two accesses and check if offset + 1 fits into int16_t. + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + if (source.rm() == scratch) { +#if defined(V8_TARGET_LITTLE_ENDIAN) + lbu(rd, MemOperand(source.rm(), source.offset() + 1)); + lbu(scratch, source); +#elif defined(V8_TARGET_BIG_ENDIAN) + lbu(rd, source); + lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); +#endif + } else { +#if defined(V8_TARGET_LITTLE_ENDIAN) + lbu(scratch, source); + lbu(rd, MemOperand(source.rm(), source.offset() + 1)); +#elif defined(V8_TARGET_BIG_ENDIAN) + lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); + lbu(rd, source); +#endif + } + sll(rd, rd, 8); + or_(rd, rd, scratch); + } +} + +void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { + DCHECK(rd != at); + DCHECK(rs.rm() != at); + DCHECK(rs.rm() != scratch); + DCHECK(scratch != at); + if (IsMipsArchVariant(kMips32r6)) { + sh(rd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + MemOperand source = rs; + // Adjust offset for two accesses and check if offset + 1 fits into int16_t. + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); + + if (scratch != rd) { + mov(scratch, rd); + } + +#if defined(V8_TARGET_LITTLE_ENDIAN) + sb(scratch, source); + srl(scratch, scratch, 8); + sb(scratch, MemOperand(source.rm(), source.offset() + 1)); +#elif defined(V8_TARGET_BIG_ENDIAN) + sb(scratch, MemOperand(source.rm(), source.offset() + 1)); + srl(scratch, scratch, 8); + sb(scratch, source); +#endif + } +} + +void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, + Register scratch) { + if (IsMipsArchVariant(kMips32r6)) { + lwc1(fd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + Ulw(scratch, rs); + mtc1(scratch, fd); + } +} + +void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs, + Register scratch) { + if (IsMipsArchVariant(kMips32r6)) { + swc1(fd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + mfc1(scratch, fd); + Usw(scratch, rs); + } +} + +void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs, + Register scratch) { + DCHECK(scratch != at); + if (IsMipsArchVariant(kMips32r6)) { + Ldc1(fd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); + mtc1(scratch, fd); + Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); + Mthc1(scratch, fd); + } +} + +void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, + Register scratch) { + DCHECK(scratch != at); + if (IsMipsArchVariant(kMips32r6)) { + Sdc1(fd, rs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + mfc1(scratch, fd); + Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); + Mfhc1(scratch, fd); + Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); + } +} + +void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { + // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit + // load to two 32-bit loads. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); + MemOperand tmp = src; + AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); + lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); + if (IsFp32Mode()) { // fp32 mode. + FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); + lwc1(nextfpreg, + MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); + } else { + DCHECK(IsFp64Mode() || IsFpxxMode()); + // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(src.rm() != scratch); + lw(scratch, + MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); + Mthc1(scratch, fd); + } + } + CheckTrampolinePoolQuick(1); +} + +void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) { + // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit + // store to two 32-bit stores. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); + MemOperand tmp = src; + AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); + swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); + if (IsFp32Mode()) { // fp32 mode. + FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); + swc1(nextfpreg, + MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); + } else { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(IsFp64Mode() || IsFpxxMode()); + // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + DCHECK(src.rm() != t8); + Mfhc1(t8, fd); + sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); + } + } + CheckTrampolinePoolQuick(1); +} + +void TurboAssembler::Lw(Register rd, const MemOperand& rs) { + MemOperand source = rs; + AdjustBaseAndOffset(&source); + lw(rd, source); +} + +void TurboAssembler::Sw(Register rd, const MemOperand& rs) { + MemOperand dest = rs; + AdjustBaseAndOffset(&dest); + sw(rd, dest); +} + +void TurboAssembler::Ll(Register rd, const MemOperand& rs) { + bool is_one_instruction = IsMipsArchVariant(kMips32r6) + ? is_int9(rs.offset()) + : is_int16(rs.offset()); + if (is_one_instruction) { + ll(rd, rs); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, rs.offset()); + addu(scratch, scratch, rs.rm()); + ll(rd, MemOperand(scratch, 0)); + } +} + +void TurboAssembler::Sc(Register rd, const MemOperand& rs) { + bool is_one_instruction = IsMipsArchVariant(kMips32r6) + ? is_int9(rs.offset()) + : is_int16(rs.offset()); + if (is_one_instruction) { + sc(rd, rs); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, rs.offset()); + addu(scratch, scratch, rs.rm()); + sc(rd, MemOperand(scratch, 0)); + } +} + +void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { + // TODO(jgruber,v8:8887): Also consider a root-relative load when generating + // non-isolate-independent code. In many cases it might be cheaper than + // embedding the relocatable value. + if (root_array_available_ && options().isolate_independent_code) { + IndirectLoadConstant(dst, value); + return; + } + li(dst, Operand(value), mode); +} + +void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { + // TODO(jgruber,v8:8887): Also consider a root-relative load when generating + // non-isolate-independent code. In many cases it might be cheaper than + // embedding the relocatable value. + if (root_array_available_ && options().isolate_independent_code) { + IndirectLoadExternalReference(dst, value); + return; + } + li(dst, Operand(value), mode); +} + +void TurboAssembler::li(Register dst, const StringConstantBase* string, + LiFlags mode) { + li(dst, Operand::EmbeddedStringConstant(string), mode); +} + +void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { + DCHECK(!j.is_reg()); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { + // Normal load of an immediate value which does not need Relocation Info. + if (is_int16(j.immediate())) { + addiu(rd, zero_reg, j.immediate()); + } else if (!(j.immediate() & kHiMask)) { + ori(rd, zero_reg, j.immediate()); + } else { + lui(rd, (j.immediate() >> kLuiShift) & kImm16Mask); + if (j.immediate() & kImm16Mask) { + ori(rd, rd, (j.immediate() & kImm16Mask)); + } + } + } else { + int32_t immediate; + if (j.IsHeapObjectRequest()) { + RequestHeapObject(j.heap_object_request()); + immediate = 0; + } else { + immediate = j.immediate(); + } + + if (MustUseReg(j.rmode())) { + RecordRelocInfo(j.rmode(), immediate); + } + // We always need the same number of instructions as we may need to patch + // this code to load another value which may need 2 instructions to load. + + lui(rd, (immediate >> kLuiShift) & kImm16Mask); + ori(rd, rd, (immediate & kImm16Mask)); + } +} + +void TurboAssembler::MultiPush(RegList regs) { + int16_t num_to_push = regs.Count(); + int16_t stack_offset = num_to_push * kPointerSize; + + Subu(sp, sp, Operand(stack_offset)); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + if ((regs.bits() & (1 << i)) != 0) { + stack_offset -= kPointerSize; + sw(ToRegister(i), MemOperand(sp, stack_offset)); + } + } +} + +void TurboAssembler::MultiPop(RegList regs) { + int16_t stack_offset = 0; + + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs.bits() & (1 << i)) != 0) { + lw(ToRegister(i), MemOperand(sp, stack_offset)); + stack_offset += kPointerSize; + } + } + addiu(sp, sp, stack_offset); +} + +void TurboAssembler::MultiPushFPU(DoubleRegList regs) { + int16_t num_to_push = regs.Count(); + int16_t stack_offset = num_to_push * kDoubleSize; + + Subu(sp, sp, Operand(stack_offset)); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + if ((regs.bits() & (1 << i)) != 0) { + stack_offset -= kDoubleSize; + Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + } + } +} + +void TurboAssembler::MultiPopFPU(DoubleRegList regs) { + int16_t stack_offset = 0; + + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs.bits() & (1 << i)) != 0) { + Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + stack_offset += kDoubleSize; + } + } + addiu(sp, sp, stack_offset); +} + +void TurboAssembler::AddPair(Register dst_low, Register dst_high, + Register left_low, Register left_high, + Register right_low, Register right_high, + Register scratch1, Register scratch2) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch3 = t8; + Addu(scratch1, left_low, right_low); + Sltu(scratch3, scratch1, left_low); + Addu(scratch2, left_high, right_high); + Addu(dst_high, scratch2, scratch3); + Move(dst_low, scratch1); +} + +void TurboAssembler::AddPair(Register dst_low, Register dst_high, + Register left_low, Register left_high, int32_t imm, + Register scratch1, Register scratch2) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch3 = t8; + li(dst_low, Operand(imm)); + sra(dst_high, dst_low, 31); + Addu(scratch1, left_low, dst_low); + Sltu(scratch3, scratch1, left_low); + Addu(scratch2, left_high, dst_high); + Addu(dst_high, scratch2, scratch3); + Move(dst_low, scratch1); +} + +void TurboAssembler::SubPair(Register dst_low, Register dst_high, + Register left_low, Register left_high, + Register right_low, Register right_high, + Register scratch1, Register scratch2) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch3 = t8; + Sltu(scratch3, left_low, right_low); + Subu(scratch1, left_low, right_low); + Subu(scratch2, left_high, right_high); + Subu(dst_high, scratch2, scratch3); + Move(dst_low, scratch1); +} + +void TurboAssembler::AndPair(Register dst_low, Register dst_high, + Register left_low, Register left_high, + Register right_low, Register right_high) { + And(dst_low, left_low, right_low); + And(dst_high, left_high, right_high); +} + +void TurboAssembler::OrPair(Register dst_low, Register dst_high, + Register left_low, Register left_high, + Register right_low, Register right_high) { + Or(dst_low, left_low, right_low); + Or(dst_high, left_high, right_high); +} +void TurboAssembler::XorPair(Register dst_low, Register dst_high, + Register left_low, Register left_high, + Register right_low, Register right_high) { + Xor(dst_low, left_low, right_low); + Xor(dst_high, left_high, right_high); +} + +void TurboAssembler::MulPair(Register dst_low, Register dst_high, + Register left_low, Register left_high, + Register right_low, Register right_high, + Register scratch1, Register scratch2) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch3 = t8; + Mulu(scratch2, scratch1, left_low, right_low); + Mul(scratch3, left_low, right_high); + Addu(scratch2, scratch2, scratch3); + Mul(scratch3, left_high, right_low); + Addu(dst_high, scratch2, scratch3); + Move(dst_low, scratch1); +} + +void TurboAssembler::ShlPair(Register dst_low, Register dst_high, + Register src_low, Register src_high, + Register shift, Register scratch1, + Register scratch2) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label done; + Register scratch3 = t8; + And(scratch3, shift, 0x3F); + sllv(dst_low, src_low, scratch3); + Nor(scratch2, zero_reg, scratch3); + srl(scratch1, src_low, 1); + srlv(scratch1, scratch1, scratch2); + sllv(dst_high, src_high, scratch3); + Or(dst_high, dst_high, scratch1); + And(scratch1, scratch3, 32); + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + Branch(&done, eq, scratch1, Operand(zero_reg)); + mov(dst_high, dst_low); + mov(dst_low, zero_reg); + } else { + movn(dst_high, dst_low, scratch1); + movn(dst_low, zero_reg, scratch1); + } + bind(&done); +} + +void TurboAssembler::ShlPair(Register dst_low, Register dst_high, + Register src_low, Register src_high, + uint32_t shift, Register scratch) { + DCHECK_NE(dst_low, src_low); + DCHECK_NE(dst_high, src_low); + shift = shift & 0x3F; + if (shift == 0) { + mov(dst_high, src_high); + mov(dst_low, src_low); + } else if (shift < 32) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + DCHECK_NE(dst_high, src_high); + srl(dst_high, src_low, 32 - shift); + Ins(dst_high, src_high, shift, 32 - shift); + sll(dst_low, src_low, shift); + } else { + sll(dst_high, src_high, shift); + sll(dst_low, src_low, shift); + srl(scratch, src_low, 32 - shift); + Or(dst_high, dst_high, scratch); + } + } else if (shift == 32) { + mov(dst_low, zero_reg); + mov(dst_high, src_low); + } else { + shift = shift - 32; + mov(dst_low, zero_reg); + sll(dst_high, src_low, shift); + } +} + +void TurboAssembler::ShrPair(Register dst_low, Register dst_high, + Register src_low, Register src_high, + Register shift, Register scratch1, + Register scratch2) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label done; + Register scratch3 = t8; + And(scratch3, shift, 0x3F); + srlv(dst_high, src_high, scratch3); + Nor(scratch2, zero_reg, scratch3); + sll(scratch1, src_high, 1); + sllv(scratch1, scratch1, scratch2); + srlv(dst_low, src_low, scratch3); + Or(dst_low, dst_low, scratch1); + And(scratch1, scratch3, 32); + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + Branch(&done, eq, scratch1, Operand(zero_reg)); + mov(dst_low, dst_high); + mov(dst_high, zero_reg); + } else { + movn(dst_low, dst_high, scratch1); + movn(dst_high, zero_reg, scratch1); + } + bind(&done); +} + +void TurboAssembler::ShrPair(Register dst_low, Register dst_high, + Register src_low, Register src_high, + uint32_t shift, Register scratch) { + DCHECK_NE(dst_low, src_high); + DCHECK_NE(dst_high, src_high); + shift = shift & 0x3F; + if (shift == 0) { + mov(dst_low, src_low); + mov(dst_high, src_high); + } else if (shift < 32) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + srl(dst_low, src_low, shift); + Ins(dst_low, src_high, 32 - shift, shift); + srl(dst_high, src_high, shift); + } else { + srl(dst_low, src_low, shift); + srl(dst_high, src_high, shift); + shift = 32 - shift; + sll(scratch, src_high, shift); + Or(dst_low, dst_low, scratch); + } + } else if (shift == 32) { + mov(dst_high, zero_reg); + mov(dst_low, src_high); + } else { + shift = shift - 32; + mov(dst_high, zero_reg); + srl(dst_low, src_high, shift); + } +} + +void TurboAssembler::SarPair(Register dst_low, Register dst_high, + Register src_low, Register src_high, + Register shift, Register scratch1, + Register scratch2) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label done; + Register scratch3 = t8; + And(scratch3, shift, 0x3F); + srav(dst_high, src_high, scratch3); + Nor(scratch2, zero_reg, scratch3); + sll(scratch1, src_high, 1); + sllv(scratch1, scratch1, scratch2); + srlv(dst_low, src_low, scratch3); + Or(dst_low, dst_low, scratch1); + And(scratch1, scratch3, 32); + Branch(&done, eq, scratch1, Operand(zero_reg)); + mov(dst_low, dst_high); + sra(dst_high, dst_high, 31); + bind(&done); +} + +void TurboAssembler::SarPair(Register dst_low, Register dst_high, + Register src_low, Register src_high, + uint32_t shift, Register scratch) { + DCHECK_NE(dst_low, src_high); + DCHECK_NE(dst_high, src_high); + shift = shift & 0x3F; + if (shift == 0) { + mov(dst_low, src_low); + mov(dst_high, src_high); + } else if (shift < 32) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + srl(dst_low, src_low, shift); + Ins(dst_low, src_high, 32 - shift, shift); + sra(dst_high, src_high, shift); + } else { + srl(dst_low, src_low, shift); + sra(dst_high, src_high, shift); + shift = 32 - shift; + sll(scratch, src_high, shift); + Or(dst_low, dst_low, scratch); + } + } else if (shift == 32) { + sra(dst_high, src_high, 31); + mov(dst_low, src_high); + } else { + shift = shift - 32; + sra(dst_high, src_high, 31); + sra(dst_low, src_high, shift); + } +} + +void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos, + uint16_t size) { + DCHECK_LT(pos, 32); + DCHECK_LT(pos + size, 33); + + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + ext_(rt, rs, pos, size); + } else { + // Move rs to rt and shift it left then right to get the + // desired bitfield on the right side and zeroes on the left. + int shift_left = 32 - (pos + size); + sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. + + int shift_right = 32 - size; + if (shift_right > 0) { + srl(rt, rt, shift_right); + } + } +} + +void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, + uint16_t size) { + DCHECK_LT(pos, 32); + DCHECK_LE(pos + size, 32); + DCHECK_NE(size, 0); + + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + ins_(rt, rs, pos, size); + } else { + DCHECK(rt != t8 && rs != t8); + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Subu(scratch, zero_reg, Operand(1)); + srl(scratch, scratch, 32 - size); + and_(t8, rs, scratch); + sll(t8, t8, pos); + sll(scratch, scratch, pos); + nor(scratch, scratch, zero_reg); + and_(scratch, rt, scratch); + or_(rt, t8, scratch); + } +} + +void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, + int size, bool sign_extend) { + srav(dest, source, pos); + Ext(dest, dest, 0, size); + if (size == 8) { + if (sign_extend) { + Seb(dest, dest); + } + } else if (size == 16) { + if (sign_extend) { + Seh(dest, dest); + } + } else { + UNREACHABLE(); + } +} + +void TurboAssembler::InsertBits(Register dest, Register source, Register pos, + int size) { + Ror(dest, dest, pos); + Ins(dest, source, 0, size); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Subu(scratch, zero_reg, pos); + Ror(dest, dest, scratch); + } +} + +void TurboAssembler::Seb(Register rd, Register rt) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + seb(rd, rt); + } else { + DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)); + sll(rd, rt, 24); + sra(rd, rd, 24); + } +} + +void TurboAssembler::Seh(Register rd, Register rt) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + seh(rd, rt); + } else { + DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)); + sll(rd, rt, 16); + sra(rd, rd, 16); + } +} + +void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { + if (IsMipsArchVariant(kMips32r6)) { + // r6 neg_s changes the sign for NaN-like operands as well. + neg_s(fd, fs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + BlockTrampolinePoolScope block_trampoline_pool(this); + Label is_nan, done; + Register scratch1 = t8; + Register scratch2 = t9; + CompareIsNanF32(fs, fs); + BranchTrueShortF(&is_nan); + Branch(USE_DELAY_SLOT, &done); + // For NaN input, neg_s will return the same NaN value, + // while the sign has to be changed separately. + neg_s(fd, fs); // In delay slot. + bind(&is_nan); + mfc1(scratch1, fs); + li(scratch2, kBinary32SignMask); + Xor(scratch1, scratch1, scratch2); + mtc1(scratch1, fd); + bind(&done); + } +} + +void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { + if (IsMipsArchVariant(kMips32r6)) { + // r6 neg_d changes the sign for NaN-like operands as well. + neg_d(fd, fs); + } else { + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kLoongson)); + BlockTrampolinePoolScope block_trampoline_pool(this); + Label is_nan, done; + Register scratch1 = t8; + Register scratch2 = t9; + CompareIsNanF64(fs, fs); + BranchTrueShortF(&is_nan); + Branch(USE_DELAY_SLOT, &done); + // For NaN input, neg_d will return the same NaN value, + // while the sign has to be changed separately. + neg_d(fd, fs); // In delay slot. + bind(&is_nan); + Move(fd, fs); + Mfhc1(scratch1, fd); + li(scratch2, HeapNumber::kSignMask); + Xor(scratch1, scratch1, scratch2); + Mthc1(scratch1, fd); + bind(&done); + } +} + +void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs, + FPURegister scratch) { + // In FP64Mode we do conversion from long. + if (IsFp64Mode()) { + mtc1(rs, scratch); + Mthc1(zero_reg, scratch); + cvt_d_l(fd, scratch); + } else { + // Convert rs to a FP value in fd. + DCHECK(fd != scratch); + DCHECK(rs != at); + + Label msb_clear, conversion_done; + // For a value which is < 2^31, regard it as a signed positve word. + Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT); + mtc1(rs, fd); + { + UseScratchRegisterScope temps(this); + Register scratch1 = temps.Acquire(); + li(scratch1, 0x41F00000); // FP value: 2^32. + + // For unsigned inputs > 2^31, we convert to double as a signed int32, + // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1. + mtc1(zero_reg, scratch); + Mthc1(scratch1, scratch); + } + + cvt_d_w(fd, fd); + + Branch(USE_DELAY_SLOT, &conversion_done); + add_d(fd, fd, scratch); + + bind(&msb_clear); + cvt_d_w(fd, fd); + + bind(&conversion_done); + } +} + +void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, + FPURegister scratch) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Trunc_uw_d(t8, fs, scratch); + mtc1(t8, fd); +} + +void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, + FPURegister scratch) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Trunc_uw_s(t8, fs, scratch); + mtc1(t8, fd); +} + +void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { + if (IsMipsArchVariant(kLoongson) && fd == fs) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Mfhc1(t8, fs); + trunc_w_d(fd, fs); + Mthc1(t8, fs); + } else { + trunc_w_d(fd, fs); + } +} + +void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) { + if (IsMipsArchVariant(kLoongson) && fd == fs) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Mfhc1(t8, fs); + round_w_d(fd, fs); + Mthc1(t8, fs); + } else { + round_w_d(fd, fs); + } +} + +void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { + if (IsMipsArchVariant(kLoongson) && fd == fs) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Mfhc1(t8, fs); + floor_w_d(fd, fs); + Mthc1(t8, fs); + } else { + floor_w_d(fd, fs); + } +} + +void TurboAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { + if (IsMipsArchVariant(kLoongson) && fd == fs) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Mfhc1(t8, fs); + ceil_w_d(fd, fs); + Mthc1(t8, fs); + } else { + ceil_w_d(fd, fs); + } +} + +void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, + FPURegister scratch) { + DCHECK(fs != scratch); + DCHECK(rd != at); + + { + // Load 2^31 into scratch as its float representation. + UseScratchRegisterScope temps(this); + Register scratch1 = temps.Acquire(); + li(scratch1, 0x41E00000); + mtc1(zero_reg, scratch); + Mthc1(scratch1, scratch); + } + // Test if scratch > fs. + // If fs < 2^31 we can convert it normally. + Label simple_convert; + CompareF64(OLT, fs, scratch); + BranchTrueShortF(&simple_convert); + + // First we subtract 2^31 from fs, then trunc it to rd + // and add 2^31 to rd. + sub_d(scratch, fs, scratch); + trunc_w_d(scratch, scratch); + mfc1(rd, scratch); + Or(rd, rd, 1 << 31); + + Label done; + Branch(&done); + // Simple conversion. + bind(&simple_convert); + trunc_w_d(scratch, fs); + mfc1(rd, scratch); + + bind(&done); +} + +void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, + FPURegister scratch) { + DCHECK(fs != scratch); + DCHECK(rd != at); + + { + // Load 2^31 into scratch as its float representation. + UseScratchRegisterScope temps(this); + Register scratch1 = temps.Acquire(); + li(scratch1, 0x4F000000); + mtc1(scratch1, scratch); + } + // Test if scratch > fs. + // If fs < 2^31 we can convert it normally. + Label simple_convert; + CompareF32(OLT, fs, scratch); + BranchTrueShortF(&simple_convert); + + // First we subtract 2^31 from fs, then trunc it to rd + // and add 2^31 to rd. + sub_s(scratch, fs, scratch); + trunc_w_s(scratch, scratch); + mfc1(rd, scratch); + Or(rd, rd, 1 << 31); + + Label done; + Branch(&done); + // Simple conversion. + bind(&simple_convert); + trunc_w_s(scratch, fs); + mfc1(rd, scratch); + + bind(&done); +} + +template +void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, + FPURoundingMode mode, RoundFunc round) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch = t8; + Register scratch2 = t9; + if (IsMipsArchVariant(kMips32r6)) { + cfc1(scratch, FCSR); + li(at, Operand(mode)); + ctc1(at, FCSR); + rint_d(dst, src); + ctc1(scratch, FCSR); + } else { + Label done; + Mfhc1(scratch, src); + Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + Branch(USE_DELAY_SLOT, &done, hs, at, + Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); + mov_d(dst, src); + round(this, dst, src); + Move(at, scratch2, dst); + or_(at, at, scratch2); + Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg)); + cvt_d_l(dst, dst); + srl(at, scratch, 31); + sll(at, at, 31); + Mthc1(at, dst); + bind(&done); + } +} + +void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) { + RoundDouble(dst, src, mode_floor, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->floor_l_d(dst, src); + }); +} + +void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) { + RoundDouble(dst, src, mode_ceil, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->ceil_l_d(dst, src); + }); +} + +void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) { + RoundDouble(dst, src, mode_trunc, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->trunc_l_d(dst, src); + }); +} + +void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) { + RoundDouble(dst, src, mode_round, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->round_l_d(dst, src); + }); +} + +template +void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, + FPURoundingMode mode, RoundFunc round) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch = t8; + if (IsMipsArchVariant(kMips32r6)) { + cfc1(scratch, FCSR); + li(at, Operand(mode)); + ctc1(at, FCSR); + rint_s(dst, src); + ctc1(scratch, FCSR); + } else { + int32_t kFloat32ExponentBias = 127; + int32_t kFloat32MantissaBits = 23; + int32_t kFloat32ExponentBits = 8; + Label done; + if (!IsDoubleZeroRegSet()) { + Move(kDoubleRegZero, 0.0); + } + mfc1(scratch, src); + Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits); + Branch(USE_DELAY_SLOT, &done, hs, at, + Operand(kFloat32ExponentBias + kFloat32MantissaBits)); + // Canonicalize the result. + sub_s(dst, src, kDoubleRegZero); + round(this, dst, src); + mfc1(at, dst); + Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg)); + cvt_s_w(dst, dst); + srl(at, scratch, 31); + sll(at, at, 31); + mtc1(at, dst); + bind(&done); + } +} + +void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) { + RoundFloat(dst, src, mode_floor, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->floor_w_s(dst, src); + }); +} + +void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) { + RoundFloat(dst, src, mode_ceil, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->ceil_w_s(dst, src); + }); +} + +void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) { + RoundFloat(dst, src, mode_trunc, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->trunc_w_s(dst, src); + }); +} + +void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) { + RoundFloat(dst, src, mode_round, + [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { + tasm->round_w_s(dst, src); + }); +} + +void TurboAssembler::Mthc1(Register rt, FPURegister fs) { + if (IsFp32Mode()) { + mtc1(rt, fs.high()); + } else { + DCHECK(IsFp64Mode() || IsFpxxMode()); + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + mthc1(rt, fs); + } +} + +void TurboAssembler::Mfhc1(Register rt, FPURegister fs) { + if (IsFp32Mode()) { + mfc1(rt, fs.high()); + } else { + DCHECK(IsFp64Mode() || IsFpxxMode()); + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + mfhc1(rt, fs); + } +} + +void TurboAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft, FPURegister scratch) { + if (IsMipsArchVariant(kMips32r2)) { + madd_s(fd, fr, fs, ft); + } else { + DCHECK(fr != scratch && fs != scratch && ft != scratch); + mul_s(scratch, fs, ft); + add_s(fd, fr, scratch); + } +} + +void TurboAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft, FPURegister scratch) { + if (IsMipsArchVariant(kMips32r2)) { + madd_d(fd, fr, fs, ft); + } else { + DCHECK(fr != scratch && fs != scratch && ft != scratch); + mul_d(scratch, fs, ft); + add_d(fd, fr, scratch); + } +} + +void TurboAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft, FPURegister scratch) { + if (IsMipsArchVariant(kMips32r2)) { + msub_s(fd, fr, fs, ft); + } else { + DCHECK(fr != scratch && fs != scratch && ft != scratch); + mul_s(scratch, fs, ft); + sub_s(fd, scratch, fr); + } +} + +void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft, FPURegister scratch) { + if (IsMipsArchVariant(kMips32r2)) { + msub_d(fd, fr, fs, ft); + } else { + DCHECK(fr != scratch && fs != scratch && ft != scratch); + mul_d(scratch, fs, ft); + sub_d(fd, scratch, fr); + } +} + +void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, + FPURegister cmp1, FPURegister cmp2) { + if (IsMipsArchVariant(kMips32r6)) { + sizeField = sizeField == D ? L : W; + DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg); + cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2); + } else { + c(cc, sizeField, cmp1, cmp2); + } +} + +void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, + FPURegister cmp2) { + CompareF(sizeField, UN, cmp1, cmp2); +} + +void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { + if (IsMipsArchVariant(kMips32r6)) { + bc1nez(target, kDoubleCompareReg); + } else { + bc1t(target); + } + if (bd == PROTECT) { + nop(); + } +} + +void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { + if (IsMipsArchVariant(kMips32r6)) { + bc1eqz(target, kDoubleCompareReg); + } else { + bc1f(target); + } + if (bd == PROTECT) { + nop(); + } +} + +void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { + bool long_branch = + target->is_bound() ? !is_near(target) : is_trampoline_emitted(); + if (long_branch) { + Label skip; + BranchFalseShortF(&skip); + BranchLong(target, bd); + bind(&skip); + } else { + BranchTrueShortF(target, bd); + } +} + +void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { + bool long_branch = + target->is_bound() ? !is_near(target) : is_trampoline_emitted(); + if (long_branch) { + Label skip; + BranchTrueShortF(&skip); + BranchLong(target, bd); + bind(&skip); + } else { + BranchFalseShortF(target, bd); + } +} + +void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, + MSABranchCondition cond, MSARegister wt, + BranchDelaySlot bd) { + { + BlockTrampolinePoolScope block_trampoline_pool(this); + + if (target) { + bool long_branch = + target->is_bound() ? !is_near(target) : is_trampoline_emitted(); + if (long_branch) { + Label skip; + MSABranchCondition neg_cond = NegateMSABranchCondition(cond); + BranchShortMSA(df, &skip, neg_cond, wt, bd); + BranchLong(target, bd); + bind(&skip); + } else { + BranchShortMSA(df, target, cond, wt, bd); + } + } + } +} + +void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, + MSABranchCondition cond, MSARegister wt, + BranchDelaySlot bd) { + if (IsMipsArchVariant(kMips32r6)) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (target) { + switch (cond) { + case all_not_zero: + switch (df) { + case MSA_BRANCH_D: + bnz_d(wt, target); + break; + case MSA_BRANCH_W: + bnz_w(wt, target); + break; + case MSA_BRANCH_H: + bnz_h(wt, target); + break; + case MSA_BRANCH_B: + default: + bnz_b(wt, target); + } + break; + case one_elem_not_zero: + bnz_v(wt, target); + break; + case one_elem_zero: + switch (df) { + case MSA_BRANCH_D: + bz_d(wt, target); + break; + case MSA_BRANCH_W: + bz_w(wt, target); + break; + case MSA_BRANCH_H: + bz_h(wt, target); + break; + case MSA_BRANCH_B: + default: + bz_b(wt, target); + } + break; + case all_zero: + bz_v(wt, target); + break; + default: + UNREACHABLE(); + } + } + } + if (bd == PROTECT) { + nop(); + } +} + +void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { + if (IsFp32Mode()) { + mtc1(src_low, dst); + } else { + DCHECK(IsFp64Mode() || IsFpxxMode()); + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(src_low != scratch); + mfhc1(scratch, dst); + mtc1(src_low, dst); + mthc1(scratch, dst); + } +} + +void TurboAssembler::Move(FPURegister dst, uint32_t src) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(static_cast(src))); + mtc1(scratch, dst); +} + +void TurboAssembler::Move(FPURegister dst, uint64_t src) { + // Handle special values first. + if (src == bit_cast(0.0) && has_double_zero_reg_set_) { + mov_d(dst, kDoubleRegZero); + } else if (src == bit_cast(-0.0) && has_double_zero_reg_set_) { + Neg_d(dst, kDoubleRegZero); + } else { + uint32_t lo = src & 0xFFFFFFFF; + uint32_t hi = src >> 32; + // Move the low part of the double into the lower of the corresponding FPU + // register of FPU register pair. + if (lo != 0) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(lo)); + mtc1(scratch, dst); + } else { + mtc1(zero_reg, dst); + } + // Move the high part of the double into the higher of the corresponding FPU + // register of FPU register pair. + if (hi != 0) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(hi)); + Mthc1(scratch, dst); + } else { + Mthc1(zero_reg, dst); + } + if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true; + } +} + +void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs, + const Operand& rt, Condition cond) { + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + mov(rd, zero_reg); + break; + case eq: + if (rs == zero_reg) { + if (rt.is_reg()) { + LoadZeroIfConditionZero(rd, rt.rm()); + } else { + if (rt.immediate() == 0) { + mov(rd, zero_reg); + } else { + nop(); + } + } + } else if (IsZero(rt)) { + LoadZeroIfConditionZero(rd, rs); + } else { + Subu(t9, rs, rt); + LoadZeroIfConditionZero(rd, t9); + } + break; + case ne: + if (rs == zero_reg) { + if (rt.is_reg()) { + LoadZeroIfConditionNotZero(rd, rt.rm()); + } else { + if (rt.immediate() != 0) { + mov(rd, zero_reg); + } else { + nop(); + } + } + } else if (IsZero(rt)) { + LoadZeroIfConditionNotZero(rd, rs); + } else { + Subu(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + } + break; + + // Signed comparison. + case greater: + Sgt(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + break; + case greater_equal: + Sge(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + // rs >= rt + break; + case less: + Slt(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + // rs < rt + break; + case less_equal: + Sle(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + // rs <= rt + break; + + // Unsigned comparison. + case Ugreater: + Sgtu(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + // rs > rt + break; + + case Ugreater_equal: + Sgeu(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + // rs >= rt + break; + case Uless: + Sltu(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + // rs < rt + break; + case Uless_equal: + Sleu(t9, rs, rt); + LoadZeroIfConditionNotZero(rd, t9); + // rs <= rt + break; + default: + UNREACHABLE(); + } +} + +void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, + Register condition) { + if (IsMipsArchVariant(kMips32r6)) { + seleqz(dest, dest, condition); + } else { + Movn(dest, zero_reg, condition); + } +} + +void TurboAssembler::LoadZeroIfConditionZero(Register dest, + Register condition) { + if (IsMipsArchVariant(kMips32r6)) { + selnez(dest, dest, condition); + } else { + Movz(dest, zero_reg, condition); + } +} + +void TurboAssembler::LoadZeroIfFPUCondition(Register dest) { + if (IsMipsArchVariant(kMips32r6)) { + mfc1(kScratchReg, kDoubleCompareReg); + LoadZeroIfConditionNotZero(dest, kScratchReg); + } else { + Movt(dest, zero_reg); + } +} + +void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) { + if (IsMipsArchVariant(kMips32r6)) { + mfc1(kScratchReg, kDoubleCompareReg); + LoadZeroIfConditionZero(dest, kScratchReg); + } else { + Movf(dest, zero_reg); + } +} + +void TurboAssembler::Movz(Register rd, Register rs, Register rt) { + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + Label done; + Branch(&done, ne, rt, Operand(zero_reg)); + mov(rd, rs); + bind(&done); + } else { + movz(rd, rs, rt); + } +} + +void TurboAssembler::Movn(Register rd, Register rs, Register rt) { + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + Label done; + Branch(&done, eq, rt, Operand(zero_reg)); + mov(rd, rs); + bind(&done); + } else { + movn(rd, rs, rt); + } +} + +void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) { + if (IsMipsArchVariant(kLoongson)) { + BlockTrampolinePoolScope block_trampoline_pool(this); + // Tests an FP condition code and then conditionally move rs to rd. + // We do not currently use any FPU cc bit other than bit 0. + DCHECK_EQ(cc, 0); + DCHECK(rs != t8 && rd != t8); + Label done; + Register scratch = t8; + // For testing purposes we need to fetch content of the FCSR register and + // than test its cc (floating point condition code) bit (for cc = 0, it is + // 24. bit of the FCSR). + cfc1(scratch, FCSR); + // For the MIPS I, II and III architectures, the contents of scratch is + // UNPREDICTABLE for the instruction immediately following CFC1. + nop(); + srl(scratch, scratch, 16); + andi(scratch, scratch, 0x0080); + Branch(&done, eq, scratch, Operand(zero_reg)); + mov(rd, rs); + bind(&done); + } else { + movt(rd, rs, cc); + } +} + +void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) { + if (IsMipsArchVariant(kLoongson)) { + BlockTrampolinePoolScope block_trampoline_pool(this); + // Tests an FP condition code and then conditionally move rs to rd. + // We do not currently use any FPU cc bit other than bit 0. + DCHECK_EQ(cc, 0); + DCHECK(rs != t8 && rd != t8); + Label done; + Register scratch = t8; + // For testing purposes we need to fetch content of the FCSR register and + // than test its cc (floating point condition code) bit (for cc = 0, it is + // 24. bit of the FCSR). + cfc1(scratch, FCSR); + // For the MIPS I, II and III architectures, the contents of scratch is + // UNPREDICTABLE for the instruction immediately following CFC1. + nop(); + srl(scratch, scratch, 16); + andi(scratch, scratch, 0x0080); + Branch(&done, ne, scratch, Operand(zero_reg)); + mov(rd, rs); + bind(&done); + } else { + movf(rd, rs, cc); + } +} + +void TurboAssembler::Clz(Register rd, Register rs) { + if (IsMipsArchVariant(kLoongson)) { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(rd != t8 && rd != t9 && rs != t8 && rs != t9); + Register mask = t8; + Register scratch = t9; + Label loop, end; + { + UseScratchRegisterScope temps(this); + Register scratch1 = temps.Acquire(); + mov(scratch1, rs); + mov(rd, zero_reg); + lui(mask, 0x8000); + bind(&loop); + and_(scratch, scratch1, mask); + } + Branch(&end, ne, scratch, Operand(zero_reg)); + addiu(rd, rd, 1); + Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT); + srl(mask, mask, 1); + bind(&end); + } else { + clz(rd, rs); + } +} + +void TurboAssembler::Ctz(Register rd, Register rs) { + if (IsMipsArchVariant(kMips32r6)) { + // We don't have an instruction to count the number of trailing zeroes. + // Start by flipping the bits end-for-end so we can count the number of + // leading zeroes instead. + Ror(rd, rs, 16); + wsbh(rd, rd); + bitswap(rd, rd); + Clz(rd, rd); + } else { + // Convert trailing zeroes to trailing ones, and bits to their left + // to zeroes. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Addu(scratch, rs, -1); + Xor(rd, scratch, rs); + And(rd, rd, scratch); + // Count number of leading zeroes. + Clz(rd, rd); + // Subtract number of leading zeroes from 32 to get number of trailing + // ones. Remember that the trailing ones were formerly trailing zeroes. + li(scratch, 32); + Subu(rd, scratch, rd); + } +} + +void TurboAssembler::Popcnt(Register rd, Register rs) { + ASM_CODE_COMMENT(this); + // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + // + // A generalization of the best bit counting method to integers of + // bit-widths up to 128 (parameterized by type T) is this: + // + // v = v - ((v >> 1) & (T)~(T)0/3); // temp + // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp + // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp + // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count + // + // For comparison, for 32-bit quantities, this algorithm can be executed + // using 20 MIPS instructions (the calls to LoadConst32() generate two + // machine instructions each for the values being used in this algorithm). + // A(n unrolled) loop-based algorithm requires 25 instructions. + // + // For 64-bit quantities, this algorithm gets executed twice, (once + // for in_lo, and again for in_hi), but saves a few instructions + // because the mask values only have to be loaded once. Using this + // algorithm the count for a 64-bit operand can be performed in 29 + // instructions compared to a loop-based algorithm which requires 47 + // instructions. + uint32_t B0 = 0x55555555; // (T)~(T)0/3 + uint32_t B1 = 0x33333333; // (T)~(T)0/15*3 + uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15 + uint32_t value = 0x01010101; // (T)~(T)0/255 + uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Register scratch2 = t8; + srl(scratch, rs, 1); + li(scratch2, B0); + And(scratch, scratch, scratch2); + Subu(scratch, rs, scratch); + li(scratch2, B1); + And(rd, scratch, scratch2); + srl(scratch, scratch, 2); + And(scratch, scratch, scratch2); + Addu(scratch, rd, scratch); + srl(rd, scratch, 4); + Addu(rd, rd, scratch); + li(scratch2, B2); + And(rd, rd, scratch2); + li(scratch, value); + Mul(rd, rd, scratch); + srl(rd, rd, shift); +} + +void TurboAssembler::TryInlineTruncateDoubleToI(Register result, + DoubleRegister double_input, + Label* done) { + BlockTrampolinePoolScope block_trampoline_pool(this); + DoubleRegister single_scratch = kScratchDoubleReg.low(); + Register scratch = t9; + + // Try a conversion to a signed integer. + trunc_w_d(single_scratch, double_input); + mfc1(result, single_scratch); + // Retrieve the FCSR. + cfc1(scratch, FCSR); + // Check for overflow and NaNs. + And(scratch, scratch, + kFCSROverflowCauseMask | kFCSRUnderflowCauseMask | + kFCSRInvalidOpCauseMask); + // If we had no exceptions we are done. + Branch(done, eq, scratch, Operand(zero_reg)); +} + +void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, + Register result, + DoubleRegister double_input, + StubCallMode stub_mode) { + Label done; + + TryInlineTruncateDoubleToI(result, double_input, &done); + + // If we fell through then inline version didn't succeed - call stub instead. + push(ra); + Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. + Sdc1(double_input, MemOperand(sp, 0)); + +#if V8_ENABLE_WEBASSEMBLY + if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { + Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); +#else + // For balance. + if (false) { +#endif // V8_ENABLE_WEBASSEMBLY + } else { + Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); + } + lw(result, MemOperand(sp, 0)); + + Addu(sp, sp, Operand(kDoubleSize)); + pop(ra); + + bind(&done); +} + +// Emulated condtional branches do not emit a nop in the branch delay slot. +// +// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. +#define BRANCH_ARGS_CHECK(cond, rs, rt) \ + DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ + (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) + +void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { + DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset)); + BranchShort(offset, bdslot); +} + +void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot) { + bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); + DCHECK(is_near); + USE(is_near); +} + +void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (is_near_branch(L)) { + BranchShort(L, bdslot); + } else { + BranchLong(L, bdslot); + } + } else { + if (is_trampoline_emitted()) { + BranchLong(L, bdslot); + } else { + BranchShort(L, bdslot); + } + } +} + +void TurboAssembler::Branch(Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) { + if (cond != cc_always) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchLong(L, bdslot); + bind(&skip); + } else { + BranchLong(L, bdslot); + } + } + } else { + if (is_trampoline_emitted()) { + if (cond != cc_always) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchLong(L, bdslot); + bind(&skip); + } else { + BranchLong(L, bdslot); + } + } else { + BranchShort(L, cond, rs, rt, bdslot); + } + } +} + +void TurboAssembler::Branch(Label* L, Condition cond, Register rs, + RootIndex index, BranchDelaySlot bdslot) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Branch(L, cond, rs, Operand(scratch), bdslot); +} + +void TurboAssembler::BranchShortHelper(int16_t offset, Label* L, + BranchDelaySlot bdslot) { + DCHECK(L == nullptr || offset == 0); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + b(offset); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) nop(); +} + +void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) { + DCHECK(L == nullptr || offset == 0); + offset = GetOffset(offset, L, OffsetSize::kOffset26); + bc(offset); +} + +void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + DCHECK(is_int26(offset)); + BranchShortHelperR6(offset, nullptr); + } else { + DCHECK(is_int16(offset)); + BranchShortHelper(offset, nullptr, bdslot); + } +} + +void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + BranchShortHelperR6(0, L); + } else { + BranchShortHelper(0, L, bdslot); + } +} + +int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { + if (L) { + offset = branch_offset_helper(L, bits) >> 2; + } else { + DCHECK(is_intn(offset, bits)); + } + return offset; +} + +Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, + Register scratch) { + Register r2 = no_reg; + if (rt.is_reg()) { + r2 = rt.rm(); + } else { + r2 = scratch; + li(r2, rt); + } + + return r2; +} + +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, + OffsetSize bits) { + if (!is_near(L, bits)) return false; + *offset = GetOffset(*offset, L, bits); + return true; +} + +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt) { + if (!is_near(L, bits)) return false; + *scratch = GetRtAsRegisterHelper(rt, *scratch); + *offset = GetOffset(*offset, L, bits); + return true; +} + +bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, + Condition cond, Register rs, + const Operand& rt) { + DCHECK(L == nullptr || offset == 0); + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + + // Be careful to always use shifted_branch_offset only just before the + // branch instruction, as the location will be remember for patching the + // target. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + bc(offset); + break; + case eq: + if (rt.is_reg() && rs.code() == rt.rm().code()) { + // Pre R6 beq is used here to make the code patchable. Otherwise bc + // should be used which has no condition field so is not patchable. + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + beq(rs, scratch, offset); + nop(); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + beqzc(rs, offset); + } else { + // We don't want any other register but scratch clobbered. + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + beqc(rs, scratch, offset); + } + break; + case ne: + if (rt.is_reg() && rs.code() == rt.rm().code()) { + // Pre R6 bne is used here to make the code patchable. Otherwise we + // should not generate any instruction. + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bne(rs, scratch, offset); + nop(); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + bnezc(rs, offset); + } else { + // We don't want any other register but scratch clobbered. + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bnec(rs, scratch, offset); + } + break; + + // Signed comparison. + case greater: + // rs > rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + break; // No code needs to be emitted. + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bltzc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + bgtzc(rs, offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bltc(scratch, rs, offset); + } + break; + case greater_equal: + // rs >= rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + bc(offset); + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + blezc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + bgezc(rs, offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bgec(rs, scratch, offset); + } + break; + case less: + // rs < rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + break; // No code needs to be emitted. + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bgtzc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + bltzc(rs, offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bltc(rs, scratch, offset); + } + break; + case less_equal: + // rs <= rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + bc(offset); + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bgezc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + blezc(rs, offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bgec(scratch, rs, offset); + } + break; + + // Unsigned comparison. + case Ugreater: + // rs > rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + break; // No code needs to be emitted. + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) + return false; + bnezc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + bnezc(rs, offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bltuc(scratch, rs, offset); + } + break; + case Ugreater_equal: + // rs >= rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + bc(offset); + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) + return false; + beqzc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + bc(offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bgeuc(rs, scratch, offset); + } + break; + case Uless: + // rs < rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + break; // No code needs to be emitted. + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) + return false; + bnezc(scratch, offset); + } else if (IsZero(rt)) { + break; // No code needs to be emitted. + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bltuc(rs, scratch, offset); + } + break; + case Uless_equal: + // rs <= rt + if (rt.is_reg() && rs.code() == rt.rm().code()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + bc(offset); + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt)) + return false; + bc(offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + beqzc(rs, offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + DCHECK(rs != scratch); + bgeuc(scratch, rs, offset); + } + break; + default: + UNREACHABLE(); + } + } + CheckTrampolinePoolQuick(1); + return true; +} + +bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot) { + DCHECK(L == nullptr || offset == 0); + if (!is_near(L, OffsetSize::kOffset16)) return false; + + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + int32_t offset32; + + // Be careful to always use shifted_branch_offset only just before the + // branch instruction, as the location will be remember for patching the + // target. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + b(offset32); + break; + case eq: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + beq(rs, zero_reg, offset32); + } else { + // We don't want any other register but scratch clobbered. + scratch = GetRtAsRegisterHelper(rt, scratch); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + beq(rs, scratch, offset32); + } + break; + case ne: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bne(rs, zero_reg, offset32); + } else { + // We don't want any other register but scratch clobbered. + scratch = GetRtAsRegisterHelper(rt, scratch); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bne(rs, scratch, offset32); + } + break; + + // Signed comparison. + case greater: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bgtz(rs, offset32); + } else { + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bne(scratch, zero_reg, offset32); + } + break; + case greater_equal: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bgez(rs, offset32); + } else { + Slt(scratch, rs, rt); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + beq(scratch, zero_reg, offset32); + } + break; + case less: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bltz(rs, offset32); + } else { + Slt(scratch, rs, rt); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bne(scratch, zero_reg, offset32); + } + break; + case less_equal: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + blez(rs, offset32); + } else { + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + beq(scratch, zero_reg, offset32); + } + break; + + // Unsigned comparison. + case Ugreater: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bne(rs, zero_reg, offset32); + } else { + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bne(scratch, zero_reg, offset32); + } + break; + case Ugreater_equal: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + b(offset32); + } else { + Sltu(scratch, rs, rt); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + beq(scratch, zero_reg, offset32); + } + break; + case Uless: + if (IsZero(rt)) { + return true; // No code needs to be emitted. + } else { + Sltu(scratch, rs, rt); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + bne(scratch, zero_reg, offset32); + } + break; + case Uless_equal: + if (IsZero(rt)) { + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + beq(rs, zero_reg, offset32); + } else { + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset32 = GetOffset(offset, L, OffsetSize::kOffset16); + beq(scratch, zero_reg, offset32); + } + break; + default: + UNREACHABLE(); + } + } + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) nop(); + + return true; +} + +bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot) { + BRANCH_ARGS_CHECK(cond, rs, rt); + if (!L) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + DCHECK(is_int26(offset)); + return BranchShortHelperR6(offset, nullptr, cond, rs, rt); + } else { + DCHECK(is_int16(offset)); + return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot); + } + } else { + DCHECK_EQ(offset, 0); + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + return BranchShortHelperR6(0, L, cond, rs, rt); + } else { + return BranchShortHelper(0, L, cond, rs, rt, bdslot); + } + } +} + +void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot) { + BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); +} + +void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot) { + BranchShortCheck(0, L, cond, rs, rt, bdslot); +} + +void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) { + BranchAndLinkShort(offset, bdslot); +} + +void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot) { + bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot); + DCHECK(is_near); + USE(is_near); +} + +void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (is_near_branch(L)) { + BranchAndLinkShort(L, bdslot); + } else { + BranchAndLinkLong(L, bdslot); + } + } else { + if (is_trampoline_emitted()) { + BranchAndLinkLong(L, bdslot); + } else { + BranchAndLinkShort(L, bdslot); + } + } +} + +void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot) { + if (L->is_bound()) { + if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchAndLinkLong(L, bdslot); + bind(&skip); + } + } else { + if (is_trampoline_emitted()) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchAndLinkLong(L, bdslot); + bind(&skip); + } else { + BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot); + } + } +} + +void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, + BranchDelaySlot bdslot) { + DCHECK(L == nullptr || offset == 0); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bal(offset); + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) nop(); +} + +void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) { + DCHECK(L == nullptr || offset == 0); + offset = GetOffset(offset, L, OffsetSize::kOffset26); + balc(offset); +} + +void TurboAssembler::BranchAndLinkShort(int32_t offset, + BranchDelaySlot bdslot) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + DCHECK(is_int26(offset)); + BranchAndLinkShortHelperR6(offset, nullptr); + } else { + DCHECK(is_int16(offset)); + BranchAndLinkShortHelper(offset, nullptr, bdslot); + } +} + +void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + BranchAndLinkShortHelperR6(0, L); + } else { + BranchAndLinkShortHelper(0, L, bdslot); + } +} + +bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, + Condition cond, Register rs, + const Operand& rt) { + DCHECK(L == nullptr || offset == 0); + UseScratchRegisterScope temps(this); + Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; + OffsetSize bits = OffsetSize::kOffset16; + + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset)); + switch (cond) { + case cc_always: + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + balc(offset); + break; + case eq: + if (!is_near(L, bits)) return false; + Subu(scratch, rs, rt); + offset = GetOffset(offset, L, bits); + beqzalc(scratch, offset); + break; + case ne: + if (!is_near(L, bits)) return false; + Subu(scratch, rs, rt); + offset = GetOffset(offset, L, bits); + bnezalc(scratch, offset); + break; + + // Signed comparison. + case greater: + // rs > rt + if (rs.code() == rt.rm().code()) { + break; // No code needs to be emitted. + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bltzalc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + bgtzalc(rs, offset); + } else { + if (!is_near(L, bits)) return false; + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset = GetOffset(offset, L, bits); + bnezalc(scratch, offset); + } + break; + case greater_equal: + // rs >= rt + if (rs.code() == rt.rm().code()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + balc(offset); + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + blezalc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + bgezalc(rs, offset); + } else { + if (!is_near(L, bits)) return false; + Slt(scratch, rs, rt); + offset = GetOffset(offset, L, bits); + beqzalc(scratch, offset); + } + break; + case less: + // rs < rt + if (rs.code() == rt.rm().code()) { + break; // No code needs to be emitted. + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bgtzalc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + bltzalc(rs, offset); + } else { + if (!is_near(L, bits)) return false; + Slt(scratch, rs, rt); + offset = GetOffset(offset, L, bits); + bnezalc(scratch, offset); + } + break; + case less_equal: + // rs <= r2 + if (rs.code() == rt.rm().code()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; + balc(offset); + } else if (rs == zero_reg) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) + return false; + bgezalc(scratch, offset); + } else if (IsZero(rt)) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; + blezalc(rs, offset); + } else { + if (!is_near(L, bits)) return false; + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset = GetOffset(offset, L, bits); + beqzalc(scratch, offset); + } + break; + + // Unsigned comparison. + case Ugreater: + // rs > r2 + if (!is_near(L, bits)) return false; + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset = GetOffset(offset, L, bits); + bnezalc(scratch, offset); + break; + case Ugreater_equal: + // rs >= r2 + if (!is_near(L, bits)) return false; + Sltu(scratch, rs, rt); + offset = GetOffset(offset, L, bits); + beqzalc(scratch, offset); + break; + case Uless: + // rs < r2 + if (!is_near(L, bits)) return false; + Sltu(scratch, rs, rt); + offset = GetOffset(offset, L, bits); + bnezalc(scratch, offset); + break; + case Uless_equal: + // rs <= r2 + if (!is_near(L, bits)) return false; + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + offset = GetOffset(offset, L, bits); + beqzalc(scratch, offset); + break; + default: + UNREACHABLE(); + } + return true; +} + +// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly +// with the slt instructions. We could use sub or add instead but we would miss +// overflow cases, so we keep slt and add an intermediate third instruction. +bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, + Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + DCHECK(L == nullptr || offset == 0); + if (!is_near(L, OffsetSize::kOffset16)) return false; + + Register scratch = t8; + BlockTrampolinePoolScope block_trampoline_pool(this); + + switch (cond) { + case cc_always: + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bal(offset); + break; + case eq: + bne(rs, GetRtAsRegisterHelper(rt, scratch), 2); + nop(); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bal(offset); + break; + case ne: + beq(rs, GetRtAsRegisterHelper(rt, scratch), 2); + nop(); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bal(offset); + break; + + // Signed comparison. + case greater: + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bgezal(scratch, offset); + break; + case greater_equal: + Slt(scratch, rs, rt); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bltzal(scratch, offset); + break; + case less: + Slt(scratch, rs, rt); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bgezal(scratch, offset); + break; + case less_equal: + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bltzal(scratch, offset); + break; + + // Unsigned comparison. + case Ugreater: + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bgezal(scratch, offset); + break; + case Ugreater_equal: + Sltu(scratch, rs, rt); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bltzal(scratch, offset); + break; + case Uless: + Sltu(scratch, rs, rt); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bgezal(scratch, offset); + break; + case Uless_equal: + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); + addiu(scratch, scratch, -1); + offset = GetOffset(offset, L, OffsetSize::kOffset16); + bltzal(scratch, offset); + break; + + default: + UNREACHABLE(); + } + + // Emit a nop in the branch delay slot if required. + if (bdslot == PROTECT) nop(); + + return true; +} + +bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, + Condition cond, Register rs, + const Operand& rt, + BranchDelaySlot bdslot) { + BRANCH_ARGS_CHECK(cond, rs, rt); + + if (!L) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + DCHECK(is_int26(offset)); + return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt); + } else { + DCHECK(is_int16(offset)); + return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot); + } + } else { + DCHECK_EQ(offset, 0); + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + return BranchAndLinkShortHelperR6(0, L, cond, rs, rt); + } else { + return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot); + } + } +} + +void TurboAssembler::LoadFromConstantsTable(Register destination, + int constant_index) { + ASM_CODE_COMMENT(this); + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); + LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); + lw(destination, + FieldMemOperand(destination, + FixedArray::kHeaderSize + constant_index * kPointerSize)); +} + +void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { + lw(destination, MemOperand(kRootRegister, offset)); +} + +void TurboAssembler::LoadRootRegisterOffset(Register destination, + intptr_t offset) { + if (offset == 0) { + Move(destination, kRootRegister); + } else { + Addu(destination, kRootRegister, offset); + } +} + +void TurboAssembler::Jump(Register target, int16_t offset, Condition cond, + Register rs, const Operand& rt, BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(is_int16(offset)); + if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { + if (cond == cc_always) { + jic(target, offset); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jic(target, offset); + } + } else { + if (offset != 0) { + Addu(target, target, offset); + } + if (cond == cc_always) { + jr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) nop(); + } +} + +void TurboAssembler::Jump(Register target, Register base, int16_t offset, + Condition cond, Register rs, const Operand& rt, + BranchDelaySlot bd) { + DCHECK(is_int16(offset)); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { + if (cond == cc_always) { + jic(base, offset); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jic(base, offset); + } + } else { + if (offset != 0) { + Addu(target, base, offset); + } else { // Call through target + if (target != base) mov(target, base); + } + if (cond == cc_always) { + jr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) nop(); + } +} + +void TurboAssembler::Jump(Register target, const Operand& offset, + Condition cond, Register rs, const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && + !is_int16(offset.immediate())) { + uint32_t aui_offset, jic_offset; + Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset, + &jic_offset); + RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate()); + aui(target, target, aui_offset); + if (cond == cc_always) { + jic(target, jic_offset); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jic(target, jic_offset); + } + } else { + if (offset.immediate() != 0) { + Addu(target, target, offset); + } + if (cond == cc_always) { + jr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) nop(); + } +} + +void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, + Condition cond, Register rs, const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label skip; + if (cond != cc_always) { + Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); + } + // The first instruction of 'li' may be placed in the delay slot. + // This is not an issue, t9 is expected to be clobbered anyway. + if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { + uint32_t lui_offset, jic_offset; + UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); + if (MustUseReg(rmode)) { + RecordRelocInfo(rmode, target); + } + lui(t9, lui_offset); + Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd); + } else { + li(t9, Operand(target, rmode)); + Jump(t9, 0, al, zero_reg, Operand(zero_reg), bd); + } + bind(&skip); +} + +void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, + Register rs, const Operand& rt, BranchDelaySlot bd) { + DCHECK(!RelocInfo::IsCodeTarget(rmode)); + Jump(static_cast(target), rmode, cond, rs, rt, bd); +} + +void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, + Condition cond, Register rs, const Operand& rt, + BranchDelaySlot bd) { + DCHECK(RelocInfo::IsCodeTarget(rmode)); + BlockTrampolinePoolScope block_trampoline_pool(this); + + Builtin builtin = Builtin::kNoBuiltinId; + bool target_is_isolate_independent_builtin = + isolate()->builtins()->IsBuiltinHandle(code, &builtin) && + Builtins::IsIsolateIndependent(builtin); + if (target_is_isolate_independent_builtin && + options().use_pc_relative_calls_and_jumps) { + int32_t code_target_index = AddCodeTarget(code); + Label skip; + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond != cc_always) { + // By using delay slot, we always execute first instruction of + // GenPcRelativeJump (which is or_(t8, ra, zero_reg)). + Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); + } + GenPCRelativeJump(t8, t9, code_target_index, + RelocInfo::RELATIVE_CODE_TARGET, bd); + bind(&skip); + return; + } else if (root_array_available_ && options().isolate_independent_code) { + IndirectLoadConstant(t9, code); + Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd); + return; + } else if (target_is_isolate_independent_builtin && + options().inline_offheap_trampolines) { + // Inline the trampoline. + RecordCommentForOffHeapTrampoline(builtin); + li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); + Jump(t9, 0, cond, rs, rt, bd); + RecordComment("]"); + return; + } + + Jump(static_cast(code.address()), rmode, cond, rs, rt, bd); +} + +void TurboAssembler::Jump(const ExternalReference& reference) { + li(t9, reference); + Jump(t9); +} + +void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, + unsigned higher_limit, + Label* on_in_range) { + ASM_CODE_COMMENT(this); + if (lower_limit != 0) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Subu(scratch, value, Operand(lower_limit)); + Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit)); + } else { + Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit)); + } +} + +// Note: To call gcc-compiled C code on mips, you must call through t9. +void TurboAssembler::Call(Register target, int16_t offset, Condition cond, + Register rs, const Operand& rt, BranchDelaySlot bd) { + DCHECK(is_int16(offset)); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { + if (cond == cc_always) { + jialc(target, offset); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jialc(target, offset); + } + } else { + if (offset != 0) { + Addu(target, target, offset); + } + if (cond == cc_always) { + jalr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jalr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) nop(); + } + set_pc_for_safepoint(); +} + +// Note: To call gcc-compiled C code on mips, you must call through t9. +void TurboAssembler::Call(Register target, Register base, int16_t offset, + Condition cond, Register rs, const Operand& rt, + BranchDelaySlot bd) { + DCHECK(is_uint16(offset)); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { + if (cond == cc_always) { + jialc(base, offset); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jialc(base, offset); + } + } else { + if (offset != 0) { + Addu(target, base, offset); + } else { // Call through target + if (target != base) mov(target, base); + } + if (cond == cc_always) { + jalr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jalr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) nop(); + } + set_pc_for_safepoint(); +} + +void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, + Register rs, const Operand& rt, BranchDelaySlot bd) { + CheckBuffer(); + BlockTrampolinePoolScope block_trampoline_pool(this); + int32_t target_int = static_cast(target); + if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) { + uint32_t lui_offset, jialc_offset; + UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset); + if (MustUseReg(rmode)) { + RecordRelocInfo(rmode, target_int); + } + lui(t9, lui_offset); + Call(t9, jialc_offset, cond, rs, rt, bd); + } else { + li(t9, Operand(target_int, rmode), CONSTANT_SIZE); + Call(t9, 0, cond, rs, rt, bd); + } +} + +void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, + Condition cond, Register rs, const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + + Builtin builtin = Builtin::kNoBuiltinId; + bool target_is_isolate_independent_builtin = + isolate()->builtins()->IsBuiltinHandle(code, &builtin) && + Builtins::IsIsolateIndependent(builtin); + if (target_is_isolate_independent_builtin && + options().use_pc_relative_calls_and_jumps) { + int32_t code_target_index = AddCodeTarget(code); + Label skip; + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond != cc_always) { + Branch(PROTECT, &skip, NegateCondition(cond), rs, rt); + } + GenPCRelativeJumpAndLink(t8, code_target_index, + RelocInfo::RELATIVE_CODE_TARGET, bd); + bind(&skip); + return; + } else if (root_array_available_ && options().isolate_independent_code) { + IndirectLoadConstant(t9, code); + Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd); + return; + } else if (target_is_isolate_independent_builtin && + options().inline_offheap_trampolines) { + // Inline the trampoline. + RecordCommentForOffHeapTrampoline(builtin); + li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); + Call(t9, 0, cond, rs, rt, bd); + RecordComment("]"); + return; + } + + DCHECK(RelocInfo::IsCodeTarget(rmode)); + DCHECK(code->IsExecutable()); + Call(code.address(), rmode, cond, rs, rt, bd); +} + +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { + ASM_CODE_COMMENT(this); + STATIC_ASSERT(kSystemPointerSize == 4); + STATIC_ASSERT(kSmiShiftSize == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + Lsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2); + lw(builtin_index, + MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); +} +void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, + Register destination) { + Lw(destination, EntryFromBuiltinAsOperand(builtin)); +} +MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { + DCHECK(root_array_available()); + return MemOperand(kRootRegister, + IsolateData::BuiltinEntrySlotOffset(builtin)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + ASM_CODE_COMMENT(this); + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); +} +void TurboAssembler::CallBuiltin(Builtin builtin) { + RecordCommentForOffHeapTrampoline(builtin); + Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET); + RecordComment("]"); +} + +void TurboAssembler::PatchAndJump(Address target) { + if (kArchVariant != kMips32r6) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + mov(scratch, ra); + bal(1); // jump to lw + nop(); // in the delay slot + lw(t9, MemOperand(ra, kInstrSize * 3)); // ra == pc_ + jr(t9); + mov(ra, scratch); // in delay slot + DCHECK_EQ(reinterpret_cast(pc_) % 8, 0); + *reinterpret_cast(pc_) = target; + pc_ += sizeof(uint32_t); + } else { + // TODO(mips r6): Implement. + UNIMPLEMENTED(); + } +} + +void TurboAssembler::StoreReturnAddressAndCall(Register target) { + ASM_CODE_COMMENT(this); + // This generates the final instruction sequence for calls to C functions + // once an exit frame has been constructed. + // + // Note that this assumes the caller code (i.e. the Code object currently + // being generated) is immovable or that the callee function cannot trigger + // GC, since the callee function will return to it. + + Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); + static constexpr int kNumInstructionsToJump = 4; + Label find_ra; + // Adjust the value in ra to point to the correct return location, 2nd + // instruction past the real call into C code (the jalr(t9)), and push it. + // This is the return address of the exit frame. + if (kArchVariant >= kMips32r6) { + addiupc(ra, kNumInstructionsToJump + 1); + } else { + // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS + nal(); // nal has branch delay slot. + Addu(ra, ra, kNumInstructionsToJump * kInstrSize); + } + bind(&find_ra); + + // This spot was reserved in EnterExitFrame. + sw(ra, MemOperand(sp)); + // Stack space reservation moved to the branch delay slot below. + // Stack is still aligned. + + // Call the C routine. + mov(t9, target); // Function pointer to t9 to conform to ABI for PIC. + jalr(t9); + // Set up sp in the delay slot. + addiu(sp, sp, -kCArgsSlotsSize); + // Make sure the stored 'ra' points to this position. + DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); +} + +void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt, + BranchDelaySlot bd) { + Jump(ra, 0, cond, rs, rt, bd); +} + +void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && + (!L->is_bound() || is_near_r6(L))) { + BranchShortHelperR6(0, L); + } else { + // Generate position independent long branch. + BlockTrampolinePoolScope block_trampoline_pool(this); + int32_t imm32; + imm32 = branch_long_offset(L); + GenPCRelativeJump(t8, t9, imm32, RelocInfo::NO_INFO, bdslot); + } +} + +void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && (is_int26(offset))) { + BranchShortHelperR6(offset, nullptr); + } else { + // Generate position independent long branch. + BlockTrampolinePoolScope block_trampoline_pool(this); + GenPCRelativeJump(t8, t9, offset, RelocInfo::NO_INFO, bdslot); + } +} + +void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && + (!L->is_bound() || is_near_r6(L))) { + BranchAndLinkShortHelperR6(0, L); + } else { + // Generate position independent long branch and link. + BlockTrampolinePoolScope block_trampoline_pool(this); + int32_t imm32; + imm32 = branch_long_offset(L); + GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NO_INFO, bdslot); + } +} + +void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, + ArgumentsCountMode mode) { + switch (type) { + case kCountIsInteger: { + Lsa(sp, sp, count, kPointerSizeLog2); + break; + } + case kCountIsSmi: { + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + Lsa(sp, sp, count, kPointerSizeLog2 - kSmiTagSize, count); + break; + } + case kCountIsBytes: { + Addu(sp, sp, count); + break; + } + } + if (mode == kCountExcludesReceiver) { + Addu(sp, sp, kSystemPointerSize); + } +} + +void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, + Register receiver, + ArgumentsCountType type, + ArgumentsCountMode mode) { + DCHECK(!AreAliased(argc, receiver)); + if (mode == kCountExcludesReceiver) { + // Drop arguments without receiver and override old receiver. + DropArguments(argc, type, kCountIncludesReceiver); + sw(receiver, MemOperand(sp)); + } else { + DropArguments(argc, type, mode); + push(receiver); + } +} + +void TurboAssembler::DropAndRet(int drop) { + int32_t drop_size = drop * kSystemPointerSize; + DCHECK(is_int31(drop_size)); + + if (is_int16(drop_size)) { + Ret(USE_DELAY_SLOT); + addiu(sp, sp, drop_size); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, drop_size); + Ret(USE_DELAY_SLOT); + addu(sp, sp, scratch); + } +} + +void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, + const Operand& r2) { + // Both Drop and Ret need to be conditional. + Label skip; + if (cond != cc_always) { + Branch(&skip, NegateCondition(cond), r1, r2); + } + + Drop(drop); + Ret(); + + if (cond != cc_always) { + bind(&skip); + } +} + +void TurboAssembler::Drop(int count, Condition cond, Register reg, + const Operand& op) { + if (count <= 0) { + return; + } + + Label skip; + + if (cond != al) { + Branch(&skip, NegateCondition(cond), reg, op); + } + + Addu(sp, sp, Operand(count * kPointerSize)); + + if (cond != al) { + bind(&skip); + } +} + +void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { + if (scratch == no_reg) { + Xor(reg1, reg1, Operand(reg2)); + Xor(reg2, reg2, Operand(reg1)); + Xor(reg1, reg1, Operand(reg2)); + } else { + mov(scratch, reg1); + mov(reg1, reg2); + mov(reg2, scratch); + } +} + +void TurboAssembler::Call(Label* target) { BranchAndLink(target); } + +void TurboAssembler::LoadAddress(Register dst, Label* target) { + uint32_t address = jump_address(target); + li(dst, address); +} + +void TurboAssembler::Push(Handle handle) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(handle)); + push(scratch); +} + +void TurboAssembler::Push(Smi smi) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(smi)); + push(scratch); +} + +void TurboAssembler::PushArray(Register array, Register size, Register scratch, + Register scratch2, PushArrayOrder order) { + DCHECK(!AreAliased(array, size, scratch, scratch2)); + Label loop, entry; + if (order == PushArrayOrder::kReverse) { + mov(scratch, zero_reg); + jmp(&entry); + bind(&loop); + Lsa(scratch2, array, scratch, kPointerSizeLog2); + Lw(scratch2, MemOperand(scratch2)); + push(scratch2); + Addu(scratch, scratch, Operand(1)); + bind(&entry); + Branch(&loop, less, scratch, Operand(size)); + } else { + mov(scratch, size); + jmp(&entry); + bind(&loop); + Lsa(scratch2, array, scratch, kPointerSizeLog2); + Lw(scratch2, MemOperand(scratch2)); + push(scratch2); + bind(&entry); + Addu(scratch, scratch, Operand(-1)); + Branch(&loop, greater_equal, scratch, Operand(zero_reg)); + } +} + +// --------------------------------------------------------------------------- +// Exception handling. + +void MacroAssembler::PushStackHandler() { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); + + Push(Smi::zero()); // Padding. + + // Link the current handler as the next handler. + li(t2, + ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); + lw(t1, MemOperand(t2)); + push(t1); + + // Set this new handler as the current one. + sw(sp, MemOperand(t2)); +} + +void MacroAssembler::PopStackHandler() { + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(a1); + Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, + ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); + sw(a1, MemOperand(scratch)); +} + +void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, + const DoubleRegister src) { + sub_d(dst, src, kDoubleRegZero); +} + +void TurboAssembler::MovFromFloatResult(DoubleRegister dst) { + if (IsMipsSoftFloatABI) { + if (kArchEndian == kLittle) { + Move(dst, v0, v1); + } else { + Move(dst, v1, v0); + } + } else { + Move(dst, f0); // Reg f0 is o32 ABI FP return value. + } +} + +void TurboAssembler::MovFromFloatParameter(DoubleRegister dst) { + if (IsMipsSoftFloatABI) { + if (kArchEndian == kLittle) { + Move(dst, a0, a1); + } else { + Move(dst, a1, a0); + } + } else { + Move(dst, f12); // Reg f12 is o32 ABI FP first argument value. + } +} + +void TurboAssembler::MovToFloatParameter(DoubleRegister src) { + if (!IsMipsSoftFloatABI) { + Move(f12, src); + } else { + if (kArchEndian == kLittle) { + Move(a0, a1, src); + } else { + Move(a1, a0, src); + } + } +} + +void TurboAssembler::MovToFloatResult(DoubleRegister src) { + if (!IsMipsSoftFloatABI) { + Move(f0, src); + } else { + if (kArchEndian == kLittle) { + Move(v0, v1, src); + } else { + Move(v1, v0, src); + } + } +} + +void TurboAssembler::MovToFloatParameters(DoubleRegister src1, + DoubleRegister src2) { + if (!IsMipsSoftFloatABI) { + if (src2 == f12) { + DCHECK(src1 != f14); + Move(f14, src2); + Move(f12, src1); + } else { + Move(f12, src1); + Move(f14, src2); + } + } else { + if (kArchEndian == kLittle) { + Move(a0, a1, src1); + Move(a2, a3, src2); + } else { + Move(a1, a0, src1); + Move(a3, a2, src2); + } + } +} + +// ----------------------------------------------------------------------------- +// JavaScript invokes. + +void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { + ASM_CODE_COMMENT(this); + DCHECK(root_array_available()); + Isolate* isolate = this->isolate(); + ExternalReference limit = + kind == StackLimitKind::kRealStackLimit + ? ExternalReference::address_of_real_jslimit(isolate) + : ExternalReference::address_of_jslimit(isolate); + DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + + intptr_t offset = + TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + CHECK(is_int32(offset)); + Lw(destination, MemOperand(kRootRegister, static_cast(offset))); +} + +void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1, + Register scratch2, + Label* stack_overflow) { + ASM_CODE_COMMENT(this); + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + + LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit); + // Make scratch1 the space we have left. The stack might already be overflowed + // here which will cause scratch1 to become negative. + subu(scratch1, sp, scratch1); + // Check if the arguments will overflow the stack. + sll(scratch2, num_args, kPointerSizeLog2); + // Signed comparison. + Branch(stack_overflow, le, scratch1, Operand(scratch2)); +} + +void MacroAssembler::InvokePrologue(Register expected_parameter_count, + Register actual_parameter_count, + Label* done, InvokeType type) { + ASM_CODE_COMMENT(this); + Label regular_invoke; + + // a0: actual arguments count + // a1: function (passed through to callee) + // a2: expected arguments count + + DCHECK_EQ(actual_parameter_count, a0); + DCHECK_EQ(expected_parameter_count, a2); + + // If the expected parameter count is equal to the adaptor sentinel, no need + // to push undefined value as arguments. + if (kDontAdaptArgumentsSentinel != 0) { + Branch(®ular_invoke, eq, expected_parameter_count, + Operand(kDontAdaptArgumentsSentinel)); + } + + // If overapplication or if the actual argument count is equal to the + // formal parameter count, no need to push extra undefined values. + Subu(expected_parameter_count, expected_parameter_count, + actual_parameter_count); + Branch(®ular_invoke, le, expected_parameter_count, Operand(zero_reg)); + + Label stack_overflow; + StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow); + // Underapplication. Move the arguments already in the stack, including the + // receiver and the return address. + { + Label copy; + Register src = t3, dest = t4; + mov(src, sp); + sll(t0, expected_parameter_count, kSystemPointerSizeLog2); + Subu(sp, sp, Operand(t0)); + // Update stack pointer. + mov(dest, sp); + mov(t0, a0); + bind(©); + Lw(t1, MemOperand(src, 0)); + Sw(t1, MemOperand(dest, 0)); + Subu(t0, t0, Operand(1)); + Addu(src, src, Operand(kSystemPointerSize)); + Addu(dest, dest, Operand(kSystemPointerSize)); + Branch(©, gt, t0, Operand(zero_reg)); + } + + // Fill remaining expected arguments with undefined values. + LoadRoot(t0, RootIndex::kUndefinedValue); + { + Label loop; + bind(&loop); + Sw(t0, MemOperand(t4, 0)); + Subu(expected_parameter_count, expected_parameter_count, Operand(1)); + Addu(t4, t4, Operand(kSystemPointerSize)); + Branch(&loop, gt, expected_parameter_count, Operand(zero_reg)); + } + b(®ular_invoke); + nop(); + + bind(&stack_overflow); + { + FrameScope frame( + this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL); + CallRuntime(Runtime::kThrowStackOverflow); + break_(0xCC); + } + + bind(®ular_invoke); +} + +void MacroAssembler::CheckDebugHook(Register fun, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count) { + Label skip_hook; + li(t0, ExternalReference::debug_hook_on_function_call_address(isolate())); + lb(t0, MemOperand(t0)); + Branch(&skip_hook, eq, t0, Operand(zero_reg)); + + { + // Load receiver to pass it later to DebugOnFunctionCall hook. + LoadReceiver(t0, actual_parameter_count); + + FrameScope frame( + this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL); + SmiTag(expected_parameter_count); + Push(expected_parameter_count); + + SmiTag(actual_parameter_count); + Push(actual_parameter_count); + + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Push(t0); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + + Pop(actual_parameter_count); + SmiUntag(actual_parameter_count); + + Pop(expected_parameter_count); + SmiUntag(expected_parameter_count); + } + bind(&skip_hook); +} + +void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count, + InvokeType type) { + // You can't call a function without a valid frame. + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); + DCHECK_EQ(function, a1); + DCHECK_IMPLIES(new_target.is_valid(), new_target == a3); + + // On function call, call into the debugger if necessary. + CheckDebugHook(function, new_target, expected_parameter_count, + actual_parameter_count); + + // Clear the new.target register if not given. + if (!new_target.is_valid()) { + LoadRoot(a3, RootIndex::kUndefinedValue); + } + + Label done; + InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type); + // We call indirectly through the code field in the function to + // allow recompilation to take effect without changing any of the + // call sites. + Register code = kJavaScriptCallCodeStartRegister; + lw(code, FieldMemOperand(function, JSFunction::kCodeOffset)); + switch (type) { + case InvokeType::kCall: + Addu(code, code, Code::kHeaderSize - kHeapObjectTag); + Call(code); + break; + case InvokeType::kJump: + Addu(code, code, Code::kHeaderSize - kHeapObjectTag); + Jump(code); + break; + } + + // Continue here if InvokePrologue does handle the invocation due to + // mismatched parameter counts. + bind(&done); +} + +void MacroAssembler::InvokeFunctionWithNewTarget( + Register function, Register new_target, Register actual_parameter_count, + InvokeType type) { + ASM_CODE_COMMENT(this); + // You can't call a function without a valid frame. + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); + + // Contract with called JS functions requires that function is passed in a1. + DCHECK_EQ(function, a1); + Register expected_reg = a2; + Register temp_reg = t0; + + lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + lhu(expected_reg, + FieldMemOperand(temp_reg, + SharedFunctionInfo::kFormalParameterCountOffset)); + + InvokeFunctionCode(function, new_target, expected_reg, actual_parameter_count, + type); +} + +void MacroAssembler::InvokeFunction(Register function, + Register expected_parameter_count, + Register actual_parameter_count, + InvokeType type) { + ASM_CODE_COMMENT(this); + // You can't call a function without a valid frame. + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); + + // Contract with called JS functions requires that function is passed in a1. + DCHECK_EQ(function, a1); + + // Get the function and setup the context. + lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + InvokeFunctionCode(a1, no_reg, expected_parameter_count, + actual_parameter_count, type); +} + +// --------------------------------------------------------------------------- +// Support functions. + +void MacroAssembler::GetObjectType(Register object, Register map, + Register type_reg) { + LoadMap(map, object); + lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); +} + +void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, + InstanceType lower_limit, + Register range) { + lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); + Subu(range, type_reg, Operand(lower_limit)); +} + +// ----------------------------------------------------------------------------- +// Runtime calls. + +void TurboAssembler::AddOverflow(Register dst, Register left, + const Operand& right, Register overflow) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register right_reg = no_reg; + Register scratch = t8; + if (!right.is_reg()) { + li(at, Operand(right)); + right_reg = at; + } else { + right_reg = right.rm(); + } + + DCHECK(left != scratch && right_reg != scratch && dst != scratch && + overflow != scratch); + DCHECK(overflow != left && overflow != right_reg); + + if (dst == left || dst == right_reg) { + addu(scratch, left, right_reg); + xor_(overflow, scratch, left); + xor_(at, scratch, right_reg); + and_(overflow, overflow, at); + mov(dst, scratch); + } else { + addu(dst, left, right_reg); + xor_(overflow, dst, left); + xor_(at, dst, right_reg); + and_(overflow, overflow, at); + } +} + +void TurboAssembler::SubOverflow(Register dst, Register left, + const Operand& right, Register overflow) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register right_reg = no_reg; + Register scratch = t8; + if (!right.is_reg()) { + li(at, Operand(right)); + right_reg = at; + } else { + right_reg = right.rm(); + } + + DCHECK(left != scratch && right_reg != scratch && dst != scratch && + overflow != scratch); + DCHECK(overflow != left && overflow != right_reg); + + if (dst == left || dst == right_reg) { + subu(scratch, left, right_reg); + xor_(overflow, left, scratch); + xor_(at, left, right_reg); + and_(overflow, overflow, at); + mov(dst, scratch); + } else { + subu(dst, left, right_reg); + xor_(overflow, left, dst); + xor_(at, left, right_reg); + and_(overflow, overflow, at); + } +} + +void TurboAssembler::MulOverflow(Register dst, Register left, + const Operand& right, Register overflow) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register right_reg = no_reg; + Register scratch = t8; + Register scratch2 = t9; + if (!right.is_reg()) { + li(at, Operand(right)); + right_reg = at; + } else { + right_reg = right.rm(); + } + + DCHECK(left != scratch && right_reg != scratch && dst != scratch && + overflow != scratch); + DCHECK(overflow != left && overflow != right_reg); + + if (dst == left || dst == right_reg) { + Mul(overflow, scratch2, left, right_reg); + sra(scratch, scratch2, 31); + xor_(overflow, overflow, scratch); + mov(dst, scratch2); + } else { + Mul(overflow, dst, left, right_reg); + sra(scratch, dst, 31); + xor_(overflow, overflow, scratch); + } +} + +void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, + SaveFPRegsMode save_doubles) { + ASM_CODE_COMMENT(this); + // All parameters are on the stack. v0 has the return value after call. + + // If the expected number of arguments of the runtime function is + // constant, we check that the actual number of arguments match the + // expectation. + CHECK(f->nargs < 0 || f->nargs == num_arguments); + + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + PrepareCEntryArgs(num_arguments); + PrepareCEntryFunction(ExternalReference::Create(f)); + Handle code = + CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Call(code, RelocInfo::CODE_TARGET); +} + +void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { + ASM_CODE_COMMENT(this); + const Runtime::Function* function = Runtime::FunctionForId(fid); + DCHECK_EQ(1, function->result_size); + if (function->nargs >= 0) { + PrepareCEntryArgs(function->nargs); + } + JumpToExternalReference(ExternalReference::Create(fid)); +} + +void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, + BranchDelaySlot bd, + bool builtin_exit_frame) { + PrepareCEntryFunction(builtin); + Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, + ArgvMode::kStack, builtin_exit_frame); + Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd); +} + +void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) { + li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); + Jump(kOffHeapTrampolineRegister); +} + +void MacroAssembler::LoadWeakValue(Register out, Register in, + Label* target_if_cleared) { + Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); + + And(out, in, Operand(~kWeakHeapObjectMask)); +} + +void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value, + Register scratch1, + Register scratch2) { + DCHECK_GT(value, 0); + if (FLAG_native_code_counters && counter->Enabled()) { + ASM_CODE_COMMENT(this); + li(scratch2, ExternalReference::Create(counter)); + lw(scratch1, MemOperand(scratch2)); + Addu(scratch1, scratch1, Operand(value)); + sw(scratch1, MemOperand(scratch2)); + } +} + +void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, + Register scratch1, + Register scratch2) { + DCHECK_GT(value, 0); + if (FLAG_native_code_counters && counter->Enabled()) { + ASM_CODE_COMMENT(this); + li(scratch2, ExternalReference::Create(counter)); + lw(scratch1, MemOperand(scratch2)); + Subu(scratch1, scratch1, Operand(value)); + sw(scratch1, MemOperand(scratch2)); + } +} + +// ----------------------------------------------------------------------------- +// Debugging. + +void TurboAssembler::Trap() { stop(); } +void TurboAssembler::DebugBreak() { stop(); } + +void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, + Operand rt) { + if (FLAG_debug_code) Check(cc, reason, rs, rt); +} + +void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, + Operand rt) { + Label L; + Branch(&L, cc, rs, rt); + Abort(reason); + // Will not return here. + bind(&L); +} + +void TurboAssembler::Abort(AbortReason reason) { + Label abort_start; + bind(&abort_start); + if (FLAG_code_comments) { + const char* msg = GetAbortReason(reason); + RecordComment("Abort message: "); + RecordComment(msg); + } + + // Avoid emitting call to builtin if requested. + if (trap_on_abort()) { + stop(); + return; + } + + if (should_abort_hard()) { + // We don't care if we constructed a frame. Just pretend we did. + FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE); + PrepareCallCFunction(0, a0); + li(a0, Operand(static_cast(reason))); + CallCFunction(ExternalReference::abort_with_reason(), 1); + return; + } + + Move(a0, Smi::FromInt(static_cast(reason))); + + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NO_FRAME_TYPE); + Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); + } else { + Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); + } + // Will not return here. + if (is_trampoline_pool_blocked()) { + // If the calling code cares about the exact number of + // instructions generated, we insert padding here to keep the size + // of the Abort macro constant. + // Currently in debug mode with debug_code enabled the number of + // generated instructions is 10, so we use this as a maximum value. + static const int kExpectedAbortInstructions = 10; + int abort_instructions = InstructionsGeneratedSince(&abort_start); + DCHECK_LE(abort_instructions, kExpectedAbortInstructions); + while (abort_instructions++ < kExpectedAbortInstructions) { + nop(); + } + } +} + +void TurboAssembler::LoadMap(Register destination, Register object) { + Lw(destination, FieldMemOperand(object, HeapObject::kMapOffset)); +} + +void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { + LoadMap(dst, cp); + Lw(dst, + FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); + Lw(dst, MemOperand(dst, Context::SlotOffset(index))); +} + +void TurboAssembler::StubPrologue(StackFrame::Type type) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(StackFrame::TypeToMarker(type))); + PushCommonFrame(scratch); +} + +void TurboAssembler::Prologue() { PushStandardFrame(a1); } + +void TurboAssembler::EnterFrame(StackFrame::Type type) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Push(ra, fp); + Move(fp, sp); + if (!StackFrame::IsJavaScript(type)) { + li(kScratchReg, Operand(StackFrame::TypeToMarker(type))); + Push(kScratchReg); + } +#if V8_ENABLE_WEBASSEMBLY + if (type == StackFrame::WASM) Push(kWasmInstanceRegister); +#endif // V8_ENABLE_WEBASSEMBLY +} + +void TurboAssembler::LeaveFrame(StackFrame::Type type) { + ASM_CODE_COMMENT(this); + addiu(sp, fp, 2 * kPointerSize); + lw(ra, MemOperand(fp, 1 * kPointerSize)); + lw(fp, MemOperand(fp, 0 * kPointerSize)); +} + +void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, + StackFrame::Type frame_type) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK(frame_type == StackFrame::EXIT || + frame_type == StackFrame::BUILTIN_EXIT); + + // Set up the frame structure on the stack. + STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); + STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); + STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); + + // This is how the stack will look: + // fp + 2 (==kCallerSPDisplacement) - old stack's end + // [fp + 1 (==kCallerPCOffset)] - saved old ra + // [fp + 0 (==kCallerFPOffset)] - saved old fp + // [fp - 1 StackFrame::EXIT Smi + // [fp - 2 (==kSPOffset)] - sp of the called function + // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the + // new stack (will contain saved ra) + + // Save registers and reserve room for saved entry sp. + addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); + sw(ra, MemOperand(sp, 3 * kPointerSize)); + sw(fp, MemOperand(sp, 2 * kPointerSize)); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(StackFrame::TypeToMarker(frame_type))); + sw(scratch, MemOperand(sp, 1 * kPointerSize)); + } + // Set up new frame pointer. + addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); + + if (FLAG_debug_code) { + sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); + } + + // Save the frame pointer and the context in top. + li(t8, + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); + sw(fp, MemOperand(t8)); + li(t8, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); + sw(cp, MemOperand(t8)); + + const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); + if (save_doubles) { + // The stack must be align to 0 modulo 8 for stores with sdc1. + DCHECK_EQ(kDoubleSize, frame_alignment); + if (frame_alignment > 0) { + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + And(sp, sp, Operand(-frame_alignment)); // Align stack. + } + int space = FPURegister::kNumRegisters * kDoubleSize; + Subu(sp, sp, Operand(space)); + // Remember: we only need to save every 2nd double FPU value. + for (int i = 0; i < FPURegister::kNumRegisters; i += 2) { + FPURegister reg = FPURegister::from_code(i); + Sdc1(reg, MemOperand(sp, i * kDoubleSize)); + } + } + + // Reserve place for the return address, stack space and an optional slot + // (used by DirectCEntry to hold the return value if a struct is + // returned) and align the frame preparing for calling the runtime function. + DCHECK_GE(stack_space, 0); + Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); + if (frame_alignment > 0) { + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + And(sp, sp, Operand(-frame_alignment)); // Align stack. + } + + // Set the exit frame sp value to point just before the return address + // location. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + addiu(scratch, sp, kPointerSize); + sw(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); +} + +void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, + bool do_return, + bool argument_count_is_length) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + // Optionally restore all double registers. + if (save_doubles) { + // Remember: we only need to restore every 2nd double FPU value. + lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); + for (int i = 0; i < FPURegister::kNumRegisters; i += 2) { + FPURegister reg = FPURegister::from_code(i); + Ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); + } + } + + // Clear top frame. + li(t8, + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); + sw(zero_reg, MemOperand(t8)); + + // Restore current context from top and clear it in debug mode. + li(t8, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); + lw(cp, MemOperand(t8)); + +#ifdef DEBUG + li(t8, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); + sw(a3, MemOperand(t8)); +#endif + + // Pop the arguments, restore registers, and return. + mov(sp, fp); // Respect ABI stack constraint. + lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); + lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); + + if (argument_count.is_valid()) { + if (argument_count_is_length) { + addu(sp, sp, argument_count); + } else { + Lsa(sp, sp, argument_count, kPointerSizeLog2, t8); + } + } + + if (do_return) { + Ret(USE_DELAY_SLOT); + // If returning, the instruction in the delay slot will be the addiu below. + } + addiu(sp, sp, 8); +} + +int TurboAssembler::ActivationFrameAlignment() { +#if V8_HOST_ARCH_MIPS + // Running on the real platform. Use the alignment as mandated by the local + // environment. + // Note: This will break if we ever start generating snapshots on one Mips + // platform for another Mips platform with a different alignment. + return base::OS::ActivationFrameAlignment(); +#else // V8_HOST_ARCH_MIPS + // If we are using the simulator then we should always align to the expected + // alignment. As the simulator is used to generate snapshots we do not know + // if the target platform will need alignment, so this is controlled from a + // flag. + return FLAG_sim_stack_alignment; +#endif // V8_HOST_ARCH_MIPS +} + +void MacroAssembler::AssertStackIsAligned() { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + const int frame_alignment = ActivationFrameAlignment(); + const int frame_alignment_mask = frame_alignment - 1; + + if (frame_alignment > kPointerSize) { + Label alignment_as_expected; + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + andi(scratch, sp, frame_alignment_mask); + Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); + // Don't use Check here, as it will call Runtime_Abort re-entering here. + stop(); + bind(&alignment_as_expected); + } + } +} + +void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, + BranchDelaySlot bd) { + DCHECK_EQ(0, kSmiTag); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + andi(scratch, value, kSmiTagMask); + Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); +} + +void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, + BranchDelaySlot bd) { + DCHECK_EQ(0, kSmiTag); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + andi(scratch, value, kSmiTagMask); + Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); +} + +void MacroAssembler::AssertNotSmi(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + STATIC_ASSERT(kSmiTag == 0); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + andi(scratch, object, kSmiTagMask); + Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); + } +} + +void MacroAssembler::AssertSmi(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + STATIC_ASSERT(kSmiTag == 0); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + andi(scratch, object, kSmiTagMask); + Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); + } +} + +void MacroAssembler::AssertConstructor(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, t8); + Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8, + Operand(zero_reg)); + + LoadMap(t8, object); + lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset)); + And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask)); + Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg)); + } +} + +void MacroAssembler::AssertFunction(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, t8); + Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8, + Operand(zero_reg)); + push(object); + LoadMap(object, object); + GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8); + Check(ls, AbortReason::kOperandIsNotAFunction, t8, + Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + pop(object); + } +} + +void MacroAssembler::AssertCallableFunction(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, t8); + Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8, + Operand(zero_reg)); + push(object); + LoadMap(object, object); + GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8); + Check(ls, AbortReason::kOperandIsNotACallableFunction, t8, + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); + pop(object); + } +} + +void MacroAssembler::AssertBoundFunction(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, t8); + Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8, + Operand(zero_reg)); + GetObjectType(object, t8, t8); + Check(eq, AbortReason::kOperandIsNotABoundFunction, t8, + Operand(JS_BOUND_FUNCTION_TYPE)); + } +} + +void MacroAssembler::AssertGeneratorObject(Register object) { + if (!FLAG_debug_code) return; + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, t8); + Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8, + Operand(zero_reg)); + + GetObjectType(object, t8, t8); + + Label done; + + // Check if JSGeneratorObject + Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE)); + + // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) + Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); + + // Check if JSAsyncGeneratorObject + Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); + + Abort(AbortReason::kOperandIsNotAGeneratorObject); + + bind(&done); +} + +void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, + Register scratch) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + Label done_checking; + AssertNotSmi(object); + LoadRoot(scratch, RootIndex::kUndefinedValue); + Branch(&done_checking, eq, object, Operand(scratch)); + GetObjectType(object, scratch, scratch); + Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch, + Operand(ALLOCATION_SITE_TYPE)); + bind(&done_checking); + } +} + +void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, + FPURegister src2, Label* out_of_line) { + ASM_CODE_COMMENT(this); + if (src1 == src2) { + Move_s(dst, src1); + return; + } + + // Check if one of operands is NaN. + CompareIsNanF32(src1, src2); + BranchTrueF(out_of_line); + + if (IsMipsArchVariant(kMips32r6)) { + max_s(dst, src1, src2); + } else { + Label return_left, return_right, done; + + CompareF32(OLT, src1, src2); + BranchTrueShortF(&return_right); + CompareF32(OLT, src2, src1); + BranchTrueShortF(&return_left); + + // Operands are equal, but check for +/-0. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + mfc1(t8, src1); + Branch(&return_left, eq, t8, Operand(zero_reg)); + Branch(&return_right); + } + + bind(&return_right); + if (src2 != dst) { + Move_s(dst, src2); + } + Branch(&done); + + bind(&return_left); + if (src1 != dst) { + Move_s(dst, src1); + } + + bind(&done); + } +} + +void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, + FPURegister src2) { + add_s(dst, src1, src2); +} + +void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, + FPURegister src2, Label* out_of_line) { + ASM_CODE_COMMENT(this); + if (src1 == src2) { + Move_s(dst, src1); + return; + } + + // Check if one of operands is NaN. + CompareIsNanF32(src1, src2); + BranchTrueF(out_of_line); + + if (IsMipsArchVariant(kMips32r6)) { + min_s(dst, src1, src2); + } else { + Label return_left, return_right, done; + + CompareF32(OLT, src1, src2); + BranchTrueShortF(&return_left); + CompareF32(OLT, src2, src1); + BranchTrueShortF(&return_right); + + // Left equals right => check for -0. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + mfc1(t8, src1); + Branch(&return_right, eq, t8, Operand(zero_reg)); + Branch(&return_left); + } + + bind(&return_right); + if (src2 != dst) { + Move_s(dst, src2); + } + Branch(&done); + + bind(&return_left); + if (src1 != dst) { + Move_s(dst, src1); + } + + bind(&done); + } +} + +void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, + FPURegister src2) { + add_s(dst, src1, src2); +} + +void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1, + DoubleRegister src2, Label* out_of_line) { + ASM_CODE_COMMENT(this); + if (src1 == src2) { + Move_d(dst, src1); + return; + } + + // Check if one of operands is NaN. + CompareIsNanF64(src1, src2); + BranchTrueF(out_of_line); + + if (IsMipsArchVariant(kMips32r6)) { + max_d(dst, src1, src2); + } else { + Label return_left, return_right, done; + + CompareF64(OLT, src1, src2); + BranchTrueShortF(&return_right); + CompareF64(OLT, src2, src1); + BranchTrueShortF(&return_left); + + // Left equals right => check for -0. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + Mfhc1(t8, src1); + Branch(&return_left, eq, t8, Operand(zero_reg)); + Branch(&return_right); + } + + bind(&return_right); + if (src2 != dst) { + Move_d(dst, src2); + } + Branch(&done); + + bind(&return_left); + if (src1 != dst) { + Move_d(dst, src1); + } + + bind(&done); + } +} + +void TurboAssembler::Float64MaxOutOfLine(DoubleRegister dst, + DoubleRegister src1, + DoubleRegister src2) { + add_d(dst, src1, src2); +} + +void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1, + DoubleRegister src2, Label* out_of_line) { + ASM_CODE_COMMENT(this); + if (src1 == src2) { + Move_d(dst, src1); + return; + } + + // Check if one of operands is NaN. + CompareIsNanF64(src1, src2); + BranchTrueF(out_of_line); + + if (IsMipsArchVariant(kMips32r6)) { + min_d(dst, src1, src2); + } else { + Label return_left, return_right, done; + + CompareF64(OLT, src1, src2); + BranchTrueShortF(&return_left); + CompareF64(OLT, src2, src1); + BranchTrueShortF(&return_right); + + // Left equals right => check for -0. + { + BlockTrampolinePoolScope block_trampoline_pool(this); + Mfhc1(t8, src1); + Branch(&return_right, eq, t8, Operand(zero_reg)); + Branch(&return_left); + } + + bind(&return_right); + if (src2 != dst) { + Move_d(dst, src2); + } + Branch(&done); + + bind(&return_left); + if (src1 != dst) { + Move_d(dst, src1); + } + + bind(&done); + } +} + +void TurboAssembler::Float64MinOutOfLine(DoubleRegister dst, + DoubleRegister src1, + DoubleRegister src2) { + add_d(dst, src1, src2); +} + +static const int kRegisterPassedArguments = 4; + +int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments) { + int stack_passed_words = 0; + num_reg_arguments += 2 * num_double_arguments; + + // Up to four simple arguments are passed in registers a0..a3. + if (num_reg_arguments > kRegisterPassedArguments) { + stack_passed_words += num_reg_arguments - kRegisterPassedArguments; + } + stack_passed_words += kCArgSlotCount; + return stack_passed_words; +} + +void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, + int num_double_arguments, + Register scratch) { + ASM_CODE_COMMENT(this); + int frame_alignment = ActivationFrameAlignment(); + + // Up to four simple arguments are passed in registers a0..a3. + // Those four arguments must have reserved argument slots on the stack for + // mips, even though those argument slots are not normally used. + // Remaining arguments are pushed on the stack, above (higher address than) + // the argument slots. + int stack_passed_arguments = + CalculateStackPassedWords(num_reg_arguments, num_double_arguments); + if (frame_alignment > kPointerSize) { + // Make stack end at alignment and make room for num_arguments - 4 words + // and the original value of sp. + mov(scratch, sp); + Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + And(sp, sp, Operand(-frame_alignment)); + sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); + } +} + +void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, + Register scratch) { + PrepareCallCFunction(num_reg_arguments, 0, scratch); +} + +void TurboAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments) { + ASM_CODE_COMMENT(this); + // Linux/MIPS convention demands that register t9 contains + // the address of the function being call in case of + // Position independent code + BlockTrampolinePoolScope block_trampoline_pool(this); + li(t9, function); + CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments); +} + +void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments) { + ASM_CODE_COMMENT(this); + CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments); +} + +void TurboAssembler::CallCFunction(ExternalReference function, + int num_arguments) { + CallCFunction(function, num_arguments, 0); +} + +void TurboAssembler::CallCFunction(Register function, int num_arguments) { + CallCFunction(function, num_arguments, 0); +} + +void TurboAssembler::CallCFunctionHelper(Register function_base, + int16_t function_offset, + int num_reg_arguments, + int num_double_arguments) { + DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); + DCHECK(has_frame()); + // Make sure that the stack is aligned before calling a C function unless + // running in the simulator. The simulator has its own alignment check which + // provides more information. + // The argument stots are presumed to have been set up by + // PrepareCallCFunction. The C function must be called via t9, for mips ABI. + +#if V8_HOST_ARCH_MIPS + if (FLAG_debug_code) { + int frame_alignment = base::OS::ActivationFrameAlignment(); + int frame_alignment_mask = frame_alignment - 1; + if (frame_alignment > kPointerSize) { + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + Label alignment_as_expected; + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + And(scratch, sp, Operand(frame_alignment_mask)); + Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); + // Don't use Check here, as it will call Runtime_Abort possibly + // re-entering here. + stop(); + bind(&alignment_as_expected); + } + } +#endif // V8_HOST_ARCH_MIPS + + // Just call directly. The function called cannot cause a GC, or + // allow preemption, so the return address in the link register + // stays correct. + + { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (function_base != t9) { + mov(t9, function_base); + function_base = t9; + } + + if (function_offset != 0) { + addiu(t9, t9, function_offset); + function_offset = 0; + } + + // Save the frame pointer and PC so that the stack layout remains iterable, + // even without an ExitFrame which normally exists between JS and C frames. + // 't' registers are caller-saved so this is safe as a scratch register. + Register pc_scratch = t4; + Register scratch = t5; + DCHECK(!AreAliased(pc_scratch, scratch, function_base)); + + mov(scratch, ra); + nal(); + mov(pc_scratch, ra); + mov(ra, scratch); + + // See x64 code for reasoning about how to address the isolate data fields. + if (root_array_available()) { + sw(pc_scratch, MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_pc_offset())); + sw(fp, MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_fp_offset())); + } else { + DCHECK_NOT_NULL(isolate()); + li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); + sw(pc_scratch, MemOperand(scratch)); + li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); + sw(fp, MemOperand(scratch)); + } + + Call(function_base, function_offset); + + // We don't unset the PC; the FP is the source of truth. + if (root_array_available()) { + sw(zero_reg, MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_fp_offset())); + } else { + DCHECK_NOT_NULL(isolate()); + li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); + sw(zero_reg, MemOperand(scratch)); + } + + int stack_passed_arguments = + CalculateStackPassedWords(num_reg_arguments, num_double_arguments); + + if (base::OS::ActivationFrameAlignment() > kPointerSize) { + lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); + } + + set_pc_for_safepoint(); + } +} + +#undef BRANCH_ARGS_CHECK + +void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, + Condition cc, Label* condition_met) { + ASM_CODE_COMMENT(this); + And(scratch, object, Operand(~kPageAlignmentMask)); + lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + And(scratch, scratch, Operand(mask)); + Branch(condition_met, cc, scratch, Operand(zero_reg)); +} + +Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, + Register reg4, Register reg5, + Register reg6) { + RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6}; + + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { + int code = config->GetAllocatableGeneralCode(i); + Register candidate = Register::from_code(code); + if (regs.has(candidate)) continue; + return candidate; + } + UNREACHABLE(); +} + +void TurboAssembler::ComputeCodeStartAddress(Register dst) { + // This push on ra and the pop below together ensure that we restore the + // register ra, which is needed while computing the code start address. + push(ra); + + // The nal instruction puts the address of the current instruction into + // the return address (ra) register, which we can use later on. + if (IsMipsArchVariant(kMips32r6)) { + addiupc(ra, 1); + } else { + nal(); + nop(); + } + int pc = pc_offset(); + li(dst, pc); + subu(dst, ra, dst); + + pop(ra); // Restore ra +} + +void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, + DeoptimizeKind kind, Label* ret, + Label*) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Lw(t9, + MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target))); + Call(t9); + DCHECK_EQ(SizeOfCodeGeneratedSince(exit), + (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize + : Deoptimizer::kEagerDeoptExitSize); +} + +void TurboAssembler::LoadCodeObjectEntry(Register destination, + Register code_object) { + ASM_CODE_COMMENT(this); + // Code objects are called differently depending on whether we are generating + // builtin code (which will later be embedded into the binary) or compiling + // user JS code at runtime. + // * Builtin code runs in --jitless mode and thus must not call into on-heap + // Code targets. Instead, we dispatch through the builtins entry table. + // * Codegen at runtime does not have this restriction and we can use the + // shorter, branchless instruction sequence. The assumption here is that + // targets are usually generated code and not builtin Code objects. + if (options().isolate_independent_code) { + DCHECK(root_array_available()); + Label if_code_is_off_heap, out; + + Register scratch = kScratchReg; + DCHECK(!AreAliased(destination, scratch)); + DCHECK(!AreAliased(code_object, scratch)); + + // Check whether the Code object is an off-heap trampoline. If so, call its + // (off-heap) entry point directly without going through the (on-heap) + // trampoline. Otherwise, just call the Code object as always. + Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset)); + And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask)); + Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg)); + + // Not an off-heap trampoline object, the entry point is at + // Code::raw_instruction_start(). + Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag); + Branch(&out); + + // An off-heap trampoline, the entry point is loaded from the builtin entry + // table. + bind(&if_code_is_off_heap); + Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset)); + Lsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2); + Lw(destination, + MemOperand(destination, IsolateData::builtin_entry_table_offset())); + + bind(&out); + } else { + Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag); + } +} + +void TurboAssembler::CallCodeObject(Register code_object) { + ASM_CODE_COMMENT(this); + LoadCodeObjectEntry(code_object, code_object); + Call(code_object); +} +void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { + ASM_CODE_COMMENT(this); + DCHECK_EQ(JumpMode::kJump, jump_mode); + LoadCodeObjectEntry(code_object, code_object); + Jump(code_object); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h new file mode 100644 index 00000000000000..dc31b6e1b8adbc --- /dev/null +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h @@ -0,0 +1,1202 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H +#error This header must be included via macro-assembler.h +#endif + +#ifndef V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_ +#define V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_ + +#include "src/codegen/assembler.h" +#include "src/codegen/mips/assembler-mips.h" +#include "src/common/globals.h" +#include "src/objects/contexts.h" +#include "src/objects/tagged-index.h" + +namespace v8 { +namespace internal { + +// Forward declarations +enum class AbortReason : uint8_t; + +// Reserved Register Usage Summary. +// +// Registers t8, t9, and at are reserved for use by the MacroAssembler. +// +// The programmer should know that the MacroAssembler may clobber these three, +// but won't touch other registers except in special cases. +// +// Per the MIPS ABI, register t9 must be used for indirect function call +// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when +// trying to update gp register for position-independent-code. Whenever +// MIPS generated code calls C code, it must be via t9 register. + +// Flags used for LeaveExitFrame function. +enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false }; + +// Flags used for the li macro-assembler function. +enum LiFlags { + // If the constant value can be represented in just 16 bits, then + // optimize the li to use a single instruction, rather than lui/ori pair. + OPTIMIZE_SIZE = 0, + // Always use 2 instructions (lui/ori pair), even if the constant could + // be loaded with just one, so that this value is patchable later. + CONSTANT_SIZE = 1 +}; + +enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; + +Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg); + +// ----------------------------------------------------------------------------- +// Static helper functions. +// Generate a MemOperand for loading a field from an object. +inline MemOperand FieldMemOperand(Register object, int offset) { + return MemOperand(object, offset - kHeapObjectTag); +} + +// Generate a MemOperand for storing arguments 5..N on the stack +// when calling CallCFunction(). +inline MemOperand CFunctionArgumentOperand(int index) { + DCHECK_GT(index, kCArgSlotCount); + // Argument 5 takes the slot just past the four Arg-slots. + int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; + return MemOperand(sp, offset); +} + +class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { + public: + using TurboAssemblerBase::TurboAssemblerBase; + + // Activation support. + void EnterFrame(StackFrame::Type type); + void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { + // Out-of-line constant pool not implemented on mips. + UNREACHABLE(); + } + void LeaveFrame(StackFrame::Type type); + + void AllocateStackSpace(Register bytes) { Subu(sp, sp, bytes); } + void AllocateStackSpace(int bytes) { + DCHECK_GE(bytes, 0); + if (bytes == 0) return; + Subu(sp, sp, Operand(bytes)); + } + + // Generates function and stub prologue code. + void StubPrologue(StackFrame::Type type); + void Prologue(); + + void InitializeRootRegister() { + ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); + li(kRootRegister, Operand(isolate_root)); + } + + // Jump unconditionally to given label. + // We NEED a nop in the branch delay slot, as it used by v8, for example in + // CodeGenerator::ProcessDeferred(). + // Currently the branch delay slot is filled by the MacroAssembler. + // Use rather b(Label) for code generation. + void jmp(Label* L) { Branch(L); } + + // ------------------------------------------------------------------------- + // Debugging. + + void Trap(); + void DebugBreak(); + + // Calls Abort(msg) if the condition cc is not satisfied. + // Use --debug_code to enable. + void Assert(Condition cc, AbortReason reason, Register rs, Operand rt); + + // Like Assert(), but always enabled. + void Check(Condition cc, AbortReason reason, Register rs, Operand rt); + + // Print a message to stdout and abort execution. + void Abort(AbortReason msg); + + // Arguments macros. +#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2 +#define COND_ARGS cond, r1, r2 + + // Cases when relocation is not needed. +#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \ + void Name(target_type target, BranchDelaySlot bd = PROTECT); \ + inline void Name(BranchDelaySlot bd, target_type target) { \ + Name(target, bd); \ + } \ + void Name(target_type target, COND_TYPED_ARGS, \ + BranchDelaySlot bd = PROTECT); \ + inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \ + Name(target, COND_ARGS, bd); \ + } + +#define DECLARE_BRANCH_PROTOTYPES(Name) \ + DECLARE_NORELOC_PROTOTYPE(Name, Label*) \ + DECLARE_NORELOC_PROTOTYPE(Name, int32_t) + + DECLARE_BRANCH_PROTOTYPES(Branch) + DECLARE_BRANCH_PROTOTYPES(BranchAndLink) + DECLARE_BRANCH_PROTOTYPES(BranchShort) + +#undef DECLARE_BRANCH_PROTOTYPES +#undef COND_TYPED_ARGS +#undef COND_ARGS + + // Floating point branches + void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { + CompareF(S, cc, cmp1, cmp2); + } + + void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) { + CompareIsNanF(S, cmp1, cmp2); + } + + void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { + CompareF(D, cc, cmp1, cmp2); + } + + void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) { + CompareIsNanF(D, cmp1, cmp2); + } + + void BranchTrueShortF(Label* target, BranchDelaySlot bd = PROTECT); + void BranchFalseShortF(Label* target, BranchDelaySlot bd = PROTECT); + + void BranchTrueF(Label* target, BranchDelaySlot bd = PROTECT); + void BranchFalseF(Label* target, BranchDelaySlot bd = PROTECT); + + // MSA Branches + void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond, + MSARegister wt, BranchDelaySlot bd = PROTECT); + + void BranchLong(int32_t offset, BranchDelaySlot bdslot = PROTECT); + void Branch(Label* L, Condition cond, Register rs, RootIndex index, + BranchDelaySlot bdslot = PROTECT); + + // Load int32 in the rd register. + void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); + inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { + li(rd, Operand(j), mode); + } + void li(Register dst, Handle value, LiFlags mode = OPTIMIZE_SIZE); + void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); + void li(Register dst, const StringConstantBase* string, + LiFlags mode = OPTIMIZE_SIZE); + + void LoadFromConstantsTable(Register destination, int constant_index) final; + void LoadRootRegisterOffset(Register destination, intptr_t offset) final; + void LoadRootRelative(Register destination, int32_t offset) final; + + inline void Move(Register output, MemOperand operand) { Lw(output, operand); } + +// Jump, Call, and Ret pseudo instructions implementing inter-working. +#define COND_ARGS \ + Condition cond = al, Register rs = zero_reg, \ + const Operand &rt = Operand(zero_reg), \ + BranchDelaySlot bd = PROTECT + + void Jump(Register target, int16_t offset = 0, COND_ARGS); + void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS); + void Jump(Register target, const Operand& offset, COND_ARGS); + void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); + void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); + // Deffer from li, this method save target to the memory, and then load + // it to register use lw, it can be used in wasm jump table for concurrent + // patching. + void PatchAndJump(Address target); + void Jump(Handle code, RelocInfo::Mode rmode, COND_ARGS); + void Jump(const ExternalReference& reference); + void Call(Register target, int16_t offset = 0, COND_ARGS); + void Call(Register target, Register base, int16_t offset = 0, COND_ARGS); + void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); + void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + COND_ARGS); + void Call(Label* target); + void LoadAddress(Register dst, Label* target); + + // Load the builtin given by the Smi in |builtin| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin); + void LoadEntryFromBuiltin(Builtin builtin, Register destination); + MemOperand EntryFromBuiltinAsOperand(Builtin builtin); + + void CallBuiltinByIndex(Register builtin_index); + void CallBuiltin(Builtin builtin); + + void LoadCodeObjectEntry(Register destination, Register code_object); + void CallCodeObject(Register code_object); + + void JumpCodeObject(Register code_object, + JumpMode jump_mode = JumpMode::kJump); + + // Generates an instruction sequence s.t. the return address points to the + // instruction following the call. + // The return address on the stack is used by frame iteration. + void StoreReturnAddressAndCall(Register target); + + void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, + DeoptimizeKind kind, Label* ret, + Label* jump_deoptimization_entry_label); + + void Ret(COND_ARGS); + inline void Ret(BranchDelaySlot bd, Condition cond = al, + Register rs = zero_reg, + const Operand& rt = Operand(zero_reg)) { + Ret(cond, rs, rt, bd); + } + + // Emit code to discard a non-negative number of pointer-sized elements + // from the stack, clobbering only the sp register. + void Drop(int count, Condition cond = cc_always, Register reg = no_reg, + const Operand& op = Operand(no_reg)); + + // We assume the size of the arguments is the pointer size. + // An optional mode argument is passed, which can indicate we need to + // explicitly add the receiver to the count. + enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver }; + enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes }; + void DropArguments(Register count, ArgumentsCountType type, + ArgumentsCountMode mode); + void DropArgumentsAndPushNewReceiver(Register argc, Register receiver, + ArgumentsCountType type, + ArgumentsCountMode mode); + + // Trivial case of DropAndRet that utilizes the delay slot. + void DropAndRet(int drop); + + void DropAndRet(int drop, Condition cond, Register reg, const Operand& op); + + void Lw(Register rd, const MemOperand& rs); + void Sw(Register rd, const MemOperand& rs); + + void push(Register src) { + Addu(sp, sp, Operand(-kPointerSize)); + sw(src, MemOperand(sp, 0)); + } + + void Push(Register src) { push(src); } + void Push(Handle handle); + void Push(Smi smi); + + // Push two registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2) { + Subu(sp, sp, Operand(2 * kPointerSize)); + sw(src1, MemOperand(sp, 1 * kPointerSize)); + sw(src2, MemOperand(sp, 0 * kPointerSize)); + } + + // Push three registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3) { + Subu(sp, sp, Operand(3 * kPointerSize)); + sw(src1, MemOperand(sp, 2 * kPointerSize)); + sw(src2, MemOperand(sp, 1 * kPointerSize)); + sw(src3, MemOperand(sp, 0 * kPointerSize)); + } + + // Push four registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3, Register src4) { + Subu(sp, sp, Operand(4 * kPointerSize)); + sw(src1, MemOperand(sp, 3 * kPointerSize)); + sw(src2, MemOperand(sp, 2 * kPointerSize)); + sw(src3, MemOperand(sp, 1 * kPointerSize)); + sw(src4, MemOperand(sp, 0 * kPointerSize)); + } + + // Push five registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3, Register src4, + Register src5) { + Subu(sp, sp, Operand(5 * kPointerSize)); + sw(src1, MemOperand(sp, 4 * kPointerSize)); + sw(src2, MemOperand(sp, 3 * kPointerSize)); + sw(src3, MemOperand(sp, 2 * kPointerSize)); + sw(src4, MemOperand(sp, 1 * kPointerSize)); + sw(src5, MemOperand(sp, 0 * kPointerSize)); + } + + void Push(Register src, Condition cond, Register tst1, Register tst2) { + // Since we don't have conditional execution we use a Branch. + Branch(3, cond, tst1, Operand(tst2)); + Subu(sp, sp, Operand(kPointerSize)); + sw(src, MemOperand(sp, 0)); + } + + enum PushArrayOrder { kNormal, kReverse }; + void PushArray(Register array, Register size, Register scratch, + Register scratch2, PushArrayOrder order = kNormal); + + void MaybeSaveRegisters(RegList registers); + void MaybeRestoreRegisters(RegList registers); + + void CallEphemeronKeyBarrier(Register object, Register slot_address, + SaveFPRegsMode fp_mode); + + void CallRecordWriteStubSaveRegisters( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode = StubCallMode::kCallBuiltinPointer); + void CallRecordWriteStub( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode = StubCallMode::kCallBuiltinPointer); + + // Push multiple registers on the stack. + // Registers are saved in numerical order, with higher numbered registers + // saved in higher memory addresses. + void MultiPush(RegList regs); + void MultiPushFPU(DoubleRegList regs); + + // Calculate how much stack space (in bytes) are required to store caller + // registers excluding those specified in the arguments. + int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg) const; + + // Push caller saved registers on the stack, and return the number of bytes + // stack pointer is adjusted. + int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg); + // Restore caller saved registers from the stack, and return the number of + // bytes stack pointer is adjusted. + int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg); + + void pop(Register dst) { + lw(dst, MemOperand(sp, 0)); + Addu(sp, sp, Operand(kPointerSize)); + } + + void Pop(Register dst) { pop(dst); } + + // Pop two registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2) { + DCHECK(src1 != src2); + lw(src2, MemOperand(sp, 0 * kPointerSize)); + lw(src1, MemOperand(sp, 1 * kPointerSize)); + Addu(sp, sp, 2 * kPointerSize); + } + + // Pop three registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2, Register src3) { + lw(src3, MemOperand(sp, 0 * kPointerSize)); + lw(src2, MemOperand(sp, 1 * kPointerSize)); + lw(src1, MemOperand(sp, 2 * kPointerSize)); + Addu(sp, sp, 3 * kPointerSize); + } + + void Pop(uint32_t count = 1) { Addu(sp, sp, Operand(count * kPointerSize)); } + + // Pops multiple values from the stack and load them in the + // registers specified in regs. Pop order is the opposite as in MultiPush. + void MultiPop(RegList regs); + void MultiPopFPU(DoubleRegList regs); + + // Load Scaled Address instructions. Parameter sa (shift argument) must be + // between [1, 31] (inclusive). On pre-r6 architectures the scratch register + // may be clobbered. + void Lsa(Register rd, Register rs, Register rt, uint8_t sa, + Register scratch = at); + +#define DEFINE_INSTRUCTION(instr) \ + void instr(Register rd, Register rs, const Operand& rt); \ + void instr(Register rd, Register rs, Register rt) { \ + instr(rd, rs, Operand(rt)); \ + } \ + void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); } + +#define DEFINE_INSTRUCTION2(instr) \ + void instr(Register rs, const Operand& rt); \ + void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \ + void instr(Register rs, int32_t j) { instr(rs, Operand(j)); } + +#define DEFINE_INSTRUCTION3(instr) \ + void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \ + void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \ + instr(rd_hi, rd_lo, rs, Operand(rt)); \ + } \ + void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \ + instr(rd_hi, rd_lo, rs, Operand(j)); \ + } + + DEFINE_INSTRUCTION(Addu) + DEFINE_INSTRUCTION(Subu) + DEFINE_INSTRUCTION(Mul) + DEFINE_INSTRUCTION(Div) + DEFINE_INSTRUCTION(Divu) + DEFINE_INSTRUCTION(Mod) + DEFINE_INSTRUCTION(Modu) + DEFINE_INSTRUCTION(Mulh) + DEFINE_INSTRUCTION2(Mult) + DEFINE_INSTRUCTION(Mulhu) + DEFINE_INSTRUCTION2(Multu) + DEFINE_INSTRUCTION2(Div) + DEFINE_INSTRUCTION2(Divu) + + DEFINE_INSTRUCTION3(Div) + DEFINE_INSTRUCTION3(Mul) + DEFINE_INSTRUCTION3(Mulu) + + DEFINE_INSTRUCTION(And) + DEFINE_INSTRUCTION(Or) + DEFINE_INSTRUCTION(Xor) + DEFINE_INSTRUCTION(Nor) + DEFINE_INSTRUCTION2(Neg) + + DEFINE_INSTRUCTION(Slt) + DEFINE_INSTRUCTION(Sltu) + DEFINE_INSTRUCTION(Sle) + DEFINE_INSTRUCTION(Sleu) + DEFINE_INSTRUCTION(Sgt) + DEFINE_INSTRUCTION(Sgtu) + DEFINE_INSTRUCTION(Sge) + DEFINE_INSTRUCTION(Sgeu) + + // MIPS32 R2 instruction macro. + DEFINE_INSTRUCTION(Ror) + +#undef DEFINE_INSTRUCTION +#undef DEFINE_INSTRUCTION2 +#undef DEFINE_INSTRUCTION3 + + void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); } + + void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); } + + void SmiToInt32(Register smi) { SmiUntag(smi); } + + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + + // Before calling a C-function from generated code, align arguments on stack + // and add space for the four mips argument slots. + // After aligning the frame, non-register arguments must be stored on the + // stack, after the argument-slots using helper: CFunctionArgumentOperand(). + // The argument count assumes all arguments are word sized. + // Some compilers/platforms require the stack to be aligned when calling + // C++ code. + // Needs a scratch register to do some arithmetic. This register will be + // trashed. + void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, + Register scratch); + void PrepareCallCFunction(int num_reg_arguments, Register scratch); + + // Arguments 1-4 are placed in registers a0 through a3 respectively. + // Arguments 5..n are stored to stack using following: + // sw(t0, CFunctionArgumentOperand(5)); + + // Calls a C function and cleans up the space for arguments allocated + // by PrepareCallCFunction. The called function is not allowed to trigger a + // garbage collection, since that might move the code and invalidate the + // return address (unless this is somehow accounted for by the called + // function). + void CallCFunction(ExternalReference function, int num_arguments); + void CallCFunction(Register function, int num_arguments); + void CallCFunction(ExternalReference function, int num_reg_arguments, + int num_double_arguments); + void CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments); + void MovFromFloatResult(DoubleRegister dst); + void MovFromFloatParameter(DoubleRegister dst); + + // There are two ways of passing double arguments on MIPS, depending on + // whether soft or hard floating point ABI is used. These functions + // abstract parameter passing for the three different ways we call + // C functions from generated code. + void MovToFloatParameter(DoubleRegister src); + void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); + void MovToFloatResult(DoubleRegister src); + + // See comments at the beginning of Builtins::Generate_CEntry. + inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } + inline void PrepareCEntryFunction(const ExternalReference& ref) { + li(a1, ref); + } + + void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, + Label* condition_met); +#undef COND_ARGS + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. + void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, + DoubleRegister double_input, StubCallMode stub_mode); + + // Conditional move. + void Movz(Register rd, Register rs, Register rt); + void Movn(Register rd, Register rs, Register rt); + void Movt(Register rd, Register rs, uint16_t cc = 0); + void Movf(Register rd, Register rs, uint16_t cc = 0); + + void LoadZeroIfFPUCondition(Register dest); + void LoadZeroIfNotFPUCondition(Register dest); + + void LoadZeroIfConditionNotZero(Register dest, Register condition); + void LoadZeroIfConditionZero(Register dest, Register condition); + void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt, + Condition cond); + + void Clz(Register rd, Register rs); + void Ctz(Register rd, Register rs); + void Popcnt(Register rd, Register rs); + + // Int64Lowering instructions + void AddPair(Register dst_low, Register dst_high, Register left_low, + Register left_high, Register right_low, Register right_high, + Register scratch1, Register scratch2); + + void AddPair(Register dst_low, Register dst_high, Register left_low, + Register left_high, int32_t imm, Register scratch1, + Register scratch2); + + void SubPair(Register dst_low, Register dst_high, Register left_low, + Register left_high, Register right_low, Register right_high, + Register scratch1, Register scratch2); + + void AndPair(Register dst_low, Register dst_high, Register left_low, + Register left_high, Register right_low, Register right_high); + + void OrPair(Register dst_low, Register dst_high, Register left_low, + Register left_high, Register right_low, Register right_high); + + void XorPair(Register dst_low, Register dst_high, Register left_low, + Register left_high, Register right_low, Register right_high); + + void MulPair(Register dst_low, Register dst_high, Register left_low, + Register left_high, Register right_low, Register right_high, + Register scratch1, Register scratch2); + + void ShlPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, Register shift, Register scratch1, + Register scratch2); + + void ShlPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, uint32_t shift, Register scratch); + + void ShrPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, Register shift, Register scratch1, + Register scratch2); + + void ShrPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, uint32_t shift, Register scratch); + + void SarPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, Register shift, Register scratch1, + Register scratch2); + + void SarPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, uint32_t shift, Register scratch); + + // MIPS32 R2 instruction macro. + void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); + void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); + void ExtractBits(Register dest, Register source, Register pos, int size, + bool sign_extend = false); + void InsertBits(Register dest, Register source, Register pos, int size); + + void Seb(Register rd, Register rt); + void Seh(Register rd, Register rt); + void Neg_s(FPURegister fd, FPURegister fs); + void Neg_d(FPURegister fd, FPURegister fs); + + // MIPS32 R6 instruction macros. + void Bovc(Register rt, Register rs, Label* L); + void Bnvc(Register rt, Register rs, Label* L); + + // Convert single to unsigned word. + void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch); + void Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch); + + void Trunc_w_d(FPURegister fd, FPURegister fs); + void Round_w_d(FPURegister fd, FPURegister fs); + void Floor_w_d(FPURegister fd, FPURegister fs); + void Ceil_w_d(FPURegister fd, FPURegister fs); + + // Round double functions + void Trunc_d_d(FPURegister fd, FPURegister fs); + void Round_d_d(FPURegister fd, FPURegister fs); + void Floor_d_d(FPURegister fd, FPURegister fs); + void Ceil_d_d(FPURegister fd, FPURegister fs); + + // Round float functions + void Trunc_s_s(FPURegister fd, FPURegister fs); + void Round_s_s(FPURegister fd, FPURegister fs); + void Floor_s_s(FPURegister fd, FPURegister fs); + void Ceil_s_s(FPURegister fd, FPURegister fs); + + // FP32 mode: Move the general purpose register into + // the high part of the double-register pair. + // FP64 mode: Move the general-purpose register into + // the higher 32 bits of the 64-bit coprocessor register, + // while leaving the low bits unchanged. + void Mthc1(Register rt, FPURegister fs); + + // FP32 mode: move the high part of the double-register pair into + // general purpose register. + // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into + // general-purpose register. + void Mfhc1(Register rt, FPURegister fs); + + void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, + FPURegister scratch); + void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, + FPURegister scratch); + void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, + FPURegister scratch); + void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, + FPURegister scratch); + + // Change endianness + void ByteSwapSigned(Register dest, Register src, int operand_size); + void ByteSwapUnsigned(Register dest, Register src, int operand_size); + + void Ulh(Register rd, const MemOperand& rs); + void Ulhu(Register rd, const MemOperand& rs); + void Ush(Register rd, const MemOperand& rs, Register scratch); + + void Ulw(Register rd, const MemOperand& rs); + void Usw(Register rd, const MemOperand& rs); + + void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch); + void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch); + + void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch); + void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch); + + void Ldc1(FPURegister fd, const MemOperand& src); + void Sdc1(FPURegister fs, const MemOperand& dst); + + void Ll(Register rd, const MemOperand& rs); + void Sc(Register rd, const MemOperand& rs); + + // Perform a floating-point min or max operation with the + // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt. + // Some cases, typically NaNs or +/-0.0, are expected to be rare and are + // handled in out-of-line code. The specific behaviour depends on supported + // instructions. + // + // These functions assume (and assert) that src1!=src2. It is permitted + // for the result to alias either input register. + void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, + Label* out_of_line); + void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, + Label* out_of_line); + void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2, + Label* out_of_line); + void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2, + Label* out_of_line); + + // Generate out-of-line cases for the macros above. + void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); + void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); + void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1, + DoubleRegister src2); + void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1, + DoubleRegister src2); + + bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } + + void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } + + inline void Move(Register dst, Handle handle) { li(dst, handle); } + inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); } + + inline void Move(Register dst, Register src) { + if (dst != src) { + mov(dst, src); + } + } + + inline void Move_d(FPURegister dst, FPURegister src) { + if (dst != src) { + mov_d(dst, src); + } + } + + inline void Move_s(FPURegister dst, FPURegister src) { + if (dst != src) { + mov_s(dst, src); + } + } + + inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); } + + inline void Move(Register dst_low, Register dst_high, FPURegister src) { + mfc1(dst_low, src); + Mfhc1(dst_high, src); + } + + inline void FmoveHigh(Register dst_high, FPURegister src) { + Mfhc1(dst_high, src); + } + + inline void FmoveHigh(FPURegister dst, Register src_high) { + Mthc1(src_high, dst); + } + + inline void FmoveLow(Register dst_low, FPURegister src) { + mfc1(dst_low, src); + } + + void FmoveLow(FPURegister dst, Register src_low); + + inline void Move(FPURegister dst, Register src_low, Register src_high) { + mtc1(src_low, dst); + Mthc1(src_high, dst); + } + + void Move(FPURegister dst, float imm) { Move(dst, bit_cast(imm)); } + void Move(FPURegister dst, double imm) { Move(dst, bit_cast(imm)); } + void Move(FPURegister dst, uint32_t src); + void Move(FPURegister dst, uint64_t src); + + // ------------------------------------------------------------------------- + // Overflow operations. + + // AddOverflow sets overflow register to a negative value if + // overflow occured, otherwise it is zero or positive + void AddOverflow(Register dst, Register left, const Operand& right, + Register overflow); + // SubOverflow sets overflow register to a negative value if + // overflow occured, otherwise it is zero or positive + void SubOverflow(Register dst, Register left, const Operand& right, + Register overflow); + // MulOverflow sets overflow register to zero if no overflow occured + void MulOverflow(Register dst, Register left, const Operand& right, + Register overflow); + +// Number of instructions needed for calculation of switch table entry address +#ifdef _MIPS_ARCH_MIPS32R6 + static constexpr int kSwitchTablePrologueSize = 5; +#else + static constexpr int kSwitchTablePrologueSize = 10; +#endif + // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a + // functor/function with 'Label *func(size_t index)' declaration. + template + void GenerateSwitchTable(Register index, size_t case_count, + Func GetLabelFunction); + + // Load an object from the root table. + void LoadRoot(Register destination, RootIndex index) final; + void LoadRoot(Register destination, RootIndex index, Condition cond, + Register src1, const Operand& src2); + + void LoadMap(Register destination, Register object); + + // If the value is a NaN, canonicalize the value else, do nothing. + void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); + + // --------------------------------------------------------------------------- + // FPU macros. These do not handle special cases like NaN or +- inf. + + // Convert unsigned word to double. + void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); + + // Convert double to unsigned word. + void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); + void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch); + + // Jump the register contains a smi. + void JumpIfSmi(Register value, Label* smi_label, + BranchDelaySlot bd = PROTECT); + + void JumpIfEqual(Register a, int32_t b, Label* dest) { + li(kScratchReg, Operand(b)); + Branch(dest, eq, a, Operand(kScratchReg)); + } + + void JumpIfLessThan(Register a, int32_t b, Label* dest) { + li(kScratchReg, Operand(b)); + Branch(dest, lt, a, Operand(kScratchReg)); + } + + // Push a standard frame, consisting of ra, fp, context and JS function. + void PushStandardFrame(Register function_reg); + + // Get the actual activation frame alignment for target environment. + static int ActivationFrameAlignment(); + + // Compute the start of the generated instruction stream from the current PC. + // This is an alternative to embedding the {CodeObject} handle as a reference. + void ComputeCodeStartAddress(Register dst); + + // Control-flow integrity: + + // Define a function entrypoint. This doesn't emit any code for this + // architecture, as control-flow integrity is not supported for it. + void CodeEntry() {} + // Define an exception handler. + void ExceptionHandler() {} + // Define an exception handler and bind a label. + void BindExceptionHandler(Label* label) { bind(label); } + + protected: + void BranchLong(Label* L, BranchDelaySlot bdslot); + + inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); + + inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); + + private: + bool has_double_zero_reg_set_ = false; + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + void CallCFunctionHelper(Register function_base, int16_t function_offset, + int num_reg_arguments, int num_double_arguments); + + void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, + FPURegister cmp2); + + void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, + FPURegister cmp2); + + void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, + MSARegister wt, BranchDelaySlot bd = PROTECT); + + // TODO(mips) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); + + void BranchShortHelperR6(int32_t offset, Label* L); + void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); + bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot); + bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot); + + void BranchAndLinkShortHelperR6(int32_t offset, Label* L); + void BranchAndLinkShortHelper(int16_t offset, Label* L, + BranchDelaySlot bdslot); + void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); + void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); + bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot); + bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot); + void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); + + template + void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, + RoundFunc round); + + template + void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, + RoundFunc round); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); +}; + +// MacroAssembler implements a collection of frequently used macros. +class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { + public: + using TurboAssembler::TurboAssembler; + + // It assumes that the arguments are located below the stack pointer. + // argc is the number of arguments not including the receiver. + // TODO(victorgomes): Remove this function once we stick with the reversed + // arguments order. + void LoadReceiver(Register dest, Register argc) { + Lw(dest, MemOperand(sp, 0)); + } + + void StoreReceiver(Register rec, Register argc, Register scratch) { + Sw(rec, MemOperand(sp, 0)); + } + + // Swap two registers. If the scratch register is omitted then a slightly + // less efficient form using xor instead of mov is emitted. + void Swap(Register reg1, Register reg2, Register scratch = no_reg); + + void PushRoot(RootIndex index) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Push(scratch); + } + + // Compare the object in a register to a value and jump if they are equal. + void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Branch(if_equal, eq, with, Operand(scratch)); + } + + // Compare the object in a register to a value and jump if they are not equal. + void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Branch(if_not_equal, ne, with, Operand(scratch)); + } + + // Checks if value is in range [lower_limit, higher_limit] using a single + // comparison. + void JumpIfIsInRange(Register value, unsigned lower_limit, + unsigned higher_limit, Label* on_in_range); + + // --------------------------------------------------------------------------- + // GC Support + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, int offset, Register value, Register scratch, + RAStatus ra_status, SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); + + // For a given |object| notify the garbage collector that the slot |address| + // has been written. |value| is the object being stored. The value and + // address registers are clobbered by the operation. + void RecordWrite( + Register object, Register address, Register value, RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); + + void Pref(int32_t hint, const MemOperand& rs); + + // Enter exit frame. + // argc - argument count to be dropped by LeaveExitFrame. + // save_doubles - saves FPU registers on stack, currently disabled. + // stack_space - extra stack space. + void EnterExitFrame(bool save_doubles, int stack_space = 0, + StackFrame::Type frame_type = StackFrame::EXIT); + + // Leave the current exit frame. + void LeaveExitFrame(bool save_doubles, Register arg_count, + bool do_return = NO_EMIT_RETURN, + bool argument_count_is_length = false); + + // Make sure the stack is aligned. Only emits code in debug mode. + void AssertStackIsAligned(); + + // Load the global proxy from the current context. + void LoadGlobalProxy(Register dst) { + LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX); + } + + void LoadNativeContextSlot(Register dst, int index); + + // ------------------------------------------------------------------------- + // JavaScript invokes. + + // Invoke the JavaScript function code by either calling or jumping. + void InvokeFunctionCode(Register function, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count, InvokeType type); + + // On function call, call into the debugger if necessary. + void CheckDebugHook(Register fun, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count); + + // Invoke the JavaScript function in the given register. Changes the + // current context to the context in the function before invoking. + void InvokeFunctionWithNewTarget(Register function, Register new_target, + Register actual_parameter_count, + InvokeType type); + + void InvokeFunction(Register function, Register expected_parameter_count, + Register actual_parameter_count, InvokeType type); + + // Exception handling. + + // Push a new stack handler and link into stack handler chain. + void PushStackHandler(); + + // Unlink the stack handler on top of the stack from the stack handler chain. + // Must preserve the result register. + void PopStackHandler(); + + // ------------------------------------------------------------------------- + // Support functions. + + void GetObjectType(Register function, Register map, Register type_reg); + + void GetInstanceTypeRange(Register map, Register type_reg, + InstanceType lower_limit, Register range); + + // ------------------------------------------------------------------------- + // Runtime calls. + + // Call a runtime routine. + void CallRuntime(const Runtime::Function* f, int num_arguments, + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId fid, + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + const Runtime::Function* function = Runtime::FunctionForId(fid); + CallRuntime(function, function->nargs, save_doubles); + } + + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId id, int num_arguments, + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); + } + + // Convenience function: tail call a runtime routine (jump). + void TailCallRuntime(Runtime::FunctionId fid); + + // Jump to the builtin routine. + void JumpToExternalReference(const ExternalReference& builtin, + BranchDelaySlot bd = PROTECT, + bool builtin_exit_frame = false); + + // Generates a trampoline to jump to the off-heap instruction stream. + void JumpToOffHeapInstructionStream(Address entry); + + // --------------------------------------------------------------------------- + // In-place weak references. + void LoadWeakValue(Register out, Register in, Label* target_if_cleared); + + // ------------------------------------------------------------------------- + // StatsCounter support. + + void IncrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2) { + if (!FLAG_native_code_counters) return; + EmitIncrementCounter(counter, value, scratch1, scratch2); + } + void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2); + void DecrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2) { + if (!FLAG_native_code_counters) return; + EmitDecrementCounter(counter, value, scratch1, scratch2); + } + void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2); + + // ------------------------------------------------------------------------- + // Stack limit utilities + + enum StackLimitKind { kInterruptStackLimit, kRealStackLimit }; + void LoadStackLimit(Register destination, StackLimitKind kind); + void StackOverflowCheck(Register num_args, Register scratch1, + Register scratch2, Label* stack_overflow); + + // --------------------------------------------------------------------------- + // Smi utilities. + + void SmiTag(Register reg) { Addu(reg, reg, reg); } + + void SmiTag(Register dst, Register src) { Addu(dst, src, src); } + + // Test if the register contains a smi. + inline void SmiTst(Register value, Register scratch) { + And(scratch, value, Operand(kSmiTagMask)); + } + + // Jump if the register contains a non-smi. + void JumpIfNotSmi(Register value, Label* not_smi_label, + BranchDelaySlot bd = PROTECT); + + // Abort execution if argument is a smi, enabled via --debug-code. + void AssertNotSmi(Register object); + void AssertSmi(Register object); + + // Abort execution if argument is not a Constructor, enabled via --debug-code. + void AssertConstructor(Register object); + + // Abort execution if argument is not a JSFunction, enabled via --debug-code. + void AssertFunction(Register object); + + // Abort execution if argument is not a callable JSFunction, enabled via + // --debug-code. + void AssertCallableFunction(Register object); + + // Abort execution if argument is not a JSBoundFunction, + // enabled via --debug-code. + void AssertBoundFunction(Register object); + + // Abort execution if argument is not a JSGeneratorObject (or subclass), + // enabled via --debug-code. + void AssertGeneratorObject(Register object); + + // Abort execution if argument is not undefined or an AllocationSite, enabled + // via --debug-code. + void AssertUndefinedOrAllocationSite(Register object, Register scratch); + + template + void DecodeField(Register dst, Register src) { + Ext(dst, src, Field::kShift, Field::kSize); + } + + template + void DecodeField(Register reg) { + DecodeField(reg, reg); + } + + private: + // Helper functions for generating invokes. + void InvokePrologue(Register expected_parameter_count, + Register actual_parameter_count, Label* done, + InvokeType type); + + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); +}; + +template +void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, + Func GetLabelFunction) { + Label here; + BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + if (kArchVariant >= kMips32r6) { + addiupc(scratch, 5); + Lsa(scratch, scratch, index, kPointerSizeLog2); + lw(scratch, MemOperand(scratch)); + } else { + push(ra); + bal(&here); + sll(scratch, index, kPointerSizeLog2); // Branch delay slot. + bind(&here); + addu(scratch, scratch, ra); + pop(ra); + lw(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize)); + } + jr(scratch); + nop(); // Branch delay slot nop. + for (size_t index = 0; index < case_count; ++index) { + dd(GetLabelFunction(index)); + } +} + +#define ACCESS_MASM(masm) masm-> + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_ diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h new file mode 100644 index 00000000000000..26f04401b92fc2 --- /dev/null +++ b/deps/v8/src/codegen/mips/register-mips.h @@ -0,0 +1,299 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_MIPS_REGISTER_MIPS_H_ +#define V8_CODEGEN_MIPS_REGISTER_MIPS_H_ + +#include "src/codegen/mips/constants-mips.h" +#include "src/codegen/register-base.h" + +namespace v8 { +namespace internal { + +// clang-format off +#define GENERAL_REGISTERS(V) \ + V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \ + V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \ + V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \ + V(k0) V(k1) V(gp) V(sp) V(fp) V(ra) + +#define ALLOCATABLE_GENERAL_REGISTERS(V) \ + V(a0) V(a1) V(a2) V(a3) \ + V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7) \ + V(v0) V(v1) + +#define DOUBLE_REGISTERS(V) \ + V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ + V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ + V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ + V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31) + +// Currently, MIPS just use even float point register, except +// for C function param registers. +#define DOUBLE_USE_REGISTERS(V) \ + V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f13) \ + V(f14) V(f15) V(f16) V(f18) V(f20) V(f22) V(f24) V(f26) \ + V(f28) V(f30) + +#define FLOAT_REGISTERS DOUBLE_REGISTERS +#define SIMD128_REGISTERS(V) \ + V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \ + V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \ + V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \ + V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31) + +#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ + V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \ + V(f16) V(f18) V(f20) V(f22) V(f24) +// clang-format on + +// Register lists. +// Note that the bit values must match those used in actual instruction +// encoding. +const int kNumRegs = 32; + +// CPU Registers. +// +// 1) We would prefer to use an enum, but enum values are assignment- +// compatible with int, which has caused code-generation bugs. +// +// 2) We would prefer to use a class instead of a struct but we don't like +// the register initialization to depend on the particular initialization +// order (which appears to be different on OS X, Linux, and Windows for the +// installed versions of C++ we tried). Using a struct permits C-style +// "initialization". Also, the Register objects cannot be const as this +// forces initialization stubs in MSVC, making us dependent on initialization +// order. +// +// 3) By not using an enum, we are possibly preventing the compiler from +// doing certain constant folds, which may significantly reduce the +// code generated for some assembly instructions (because they boil down +// to a few constants). If this is a problem, we could change the code +// such that we use an enum in optimized mode, and the struct in debug +// mode. This way we get the compile-time error checking in debug mode +// and best performance in optimized code. + +// ----------------------------------------------------------------------------- +// Implementation of Register and FPURegister. + +enum RegisterCode { +#define REGISTER_CODE(R) kRegCode_##R, + GENERAL_REGISTERS(REGISTER_CODE) +#undef REGISTER_CODE + kRegAfterLast +}; + +class Register : public RegisterBase { + public: +#if defined(V8_TARGET_LITTLE_ENDIAN) + static constexpr int kMantissaOffset = 0; + static constexpr int kExponentOffset = 4; +#elif defined(V8_TARGET_BIG_ENDIAN) + static constexpr int kMantissaOffset = 4; + static constexpr int kExponentOffset = 0; +#else +#error Unknown endianness +#endif + + private: + friend class RegisterBase; + explicit constexpr Register(int code) : RegisterBase(code) {} +}; + +// s7: context register +// s3: scratch register +// s4: scratch register 2 +#define DECLARE_REGISTER(R) \ + constexpr Register R = Register::from_code(kRegCode_##R); +GENERAL_REGISTERS(DECLARE_REGISTER) +#undef DECLARE_REGISTER +constexpr Register no_reg = Register::no_reg(); + +int ToNumber(Register reg); + +Register ToRegister(int num); + +// Returns the number of padding slots needed for stack pointer alignment. +constexpr int ArgumentPaddingSlots(int argument_count) { + // No argument padding required. + return 0; +} + +constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap; +constexpr bool kSimdMaskRegisters = false; + +enum DoubleRegisterCode { +#define REGISTER_CODE(R) kDoubleCode_##R, + DOUBLE_REGISTERS(REGISTER_CODE) +#undef REGISTER_CODE + kDoubleAfterLast +}; + +// Coprocessor register. +class FPURegister : public RegisterBase { + public: + FPURegister low() const { + // Find low reg of a Double-reg pair, which is the reg itself. + DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even. + return FPURegister::from_code(code()); + } + FPURegister high() const { + // Find high reg of a Doubel-reg pair, which is reg + 1. + DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even. + return FPURegister::from_code(code() + 1); + } + + private: + friend class RegisterBase; + explicit constexpr FPURegister(int code) : RegisterBase(code) {} +}; + +enum MSARegisterCode { +#define REGISTER_CODE(R) kMsaCode_##R, + SIMD128_REGISTERS(REGISTER_CODE) +#undef REGISTER_CODE + kMsaAfterLast +}; + +// MIPS SIMD (MSA) register +class MSARegister : public RegisterBase { + friend class RegisterBase; + explicit constexpr MSARegister(int code) : RegisterBase(code) {} +}; + +// A few double registers are reserved: one as a scratch register and one to +// hold 0.0. +// f28: 0.0 +// f30: scratch register. + +// V8 now supports the O32 ABI, and the FPU Registers are organized as 32 +// 32-bit registers, f0 through f31. When used as 'double' they are used +// in pairs, starting with the even numbered register. So a double operation +// on f0 really uses f0 and f1. +// (Modern mips hardware also supports 32 64-bit registers, via setting +// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI, +// but it is not in common use. Someday we will want to support this in v8.) + +// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers. +using FloatRegister = FPURegister; + +using DoubleRegister = FPURegister; + +#define DECLARE_DOUBLE_REGISTER(R) \ + constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); +DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER) +#undef DECLARE_DOUBLE_REGISTER + +constexpr DoubleRegister no_dreg = DoubleRegister::no_reg(); + +// SIMD registers. +using Simd128Register = MSARegister; + +#define DECLARE_SIMD128_REGISTER(R) \ + constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R); +SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER) +#undef DECLARE_SIMD128_REGISTER + +const Simd128Register no_msareg = Simd128Register::no_reg(); + +// Register aliases. +// cp is assumed to be a callee saved register. +constexpr Register kRootRegister = s6; +constexpr Register cp = s7; +constexpr Register kScratchReg = s3; +constexpr Register kScratchReg2 = s4; +constexpr DoubleRegister kScratchDoubleReg = f30; +constexpr DoubleRegister kDoubleRegZero = f28; +// Used on mips32r6 for compare operations. +constexpr DoubleRegister kDoubleCompareReg = f26; +// MSA zero and scratch regs must have the same numbers as FPU zero and scratch +constexpr Simd128Register kSimd128RegZero = w28; +constexpr Simd128Register kSimd128ScratchReg = w30; + +// FPU (coprocessor 1) control registers. +// Currently only FCSR (#31) is implemented. +struct FPUControlRegister { + bool is_valid() const { return reg_code == kFCSRRegister; } + bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; } + int code() const { + DCHECK(is_valid()); + return reg_code; + } + int bit() const { + DCHECK(is_valid()); + return 1 << reg_code; + } + void setcode(int f) { + reg_code = f; + DCHECK(is_valid()); + } + // Unfortunately we can't make this private in a struct. + int reg_code; +}; + +constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister}; +constexpr FPUControlRegister FCSR = {kFCSRRegister}; + +// MSA control registers +struct MSAControlRegister { + bool is_valid() const { + return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister); + } + bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; } + int code() const { + DCHECK(is_valid()); + return reg_code; + } + int bit() const { + DCHECK(is_valid()); + return 1 << reg_code; + } + void setcode(int f) { + reg_code = f; + DCHECK(is_valid()); + } + // Unfortunately we can't make this private in a struct. + int reg_code; +}; + +constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister}; +constexpr MSAControlRegister MSAIR = {kMSAIRRegister}; +constexpr MSAControlRegister MSACSR = {kMSACSRRegister}; + +// Define {RegisterName} methods for the register types. +DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS) +DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS) +DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS) + +// Give alias names to registers for calling conventions. +constexpr Register kReturnRegister0 = v0; +constexpr Register kReturnRegister1 = v1; +constexpr Register kReturnRegister2 = a0; +constexpr Register kJSFunctionRegister = a1; +constexpr Register kContextRegister = s7; +constexpr Register kAllocateSizeRegister = a0; +constexpr Register kInterpreterAccumulatorRegister = v0; +constexpr Register kInterpreterBytecodeOffsetRegister = t4; +constexpr Register kInterpreterBytecodeArrayRegister = t5; +constexpr Register kInterpreterDispatchTableRegister = t6; + +constexpr Register kJavaScriptCallArgCountRegister = a0; +constexpr Register kJavaScriptCallCodeStartRegister = a2; +constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister; +constexpr Register kJavaScriptCallNewTargetRegister = a3; +constexpr Register kJavaScriptCallExtraArg1Register = a2; + +constexpr Register kOffHeapTrampolineRegister = at; +constexpr Register kRuntimeCallFunctionRegister = a1; +constexpr Register kRuntimeCallArgCountRegister = a0; +constexpr Register kRuntimeCallArgvRegister = a2; +constexpr Register kWasmInstanceRegister = a0; +constexpr Register kWasmCompileLazyFuncIndexRegister = t0; + +constexpr DoubleRegister kFPReturnRegister0 = f0; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_MIPS_REGISTER_MIPS_H_ diff --git a/deps/v8/src/codegen/mips/reglist-mips.h b/deps/v8/src/codegen/mips/reglist-mips.h new file mode 100644 index 00000000000000..5c458858f6afdf --- /dev/null +++ b/deps/v8/src/codegen/mips/reglist-mips.h @@ -0,0 +1,48 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_MIPS_REGLIST_MIPS_H_ +#define V8_CODEGEN_MIPS_REGLIST_MIPS_H_ + +#include "src/codegen/mips/constants-mips.h" +#include "src/codegen/register-arch.h" +#include "src/codegen/reglist-base.h" + +namespace v8 { +namespace internal { + +using RegList = RegListBase; +using DoubleRegList = RegListBase; +ASSERT_TRIVIALLY_COPYABLE(RegList); +ASSERT_TRIVIALLY_COPYABLE(DoubleRegList); + +const RegList kJSCallerSaved = {v0, v1, a0, a1, a2, a3, t0, + t1, t2, t3, t4, t5, t6, t7}; + +const int kNumJSCallerSaved = 14; + +// Callee-saved registers preserved when switching from C to JavaScript. +const RegList kCalleeSaved = {s0, // s0 + s1, // s1 + s2, // s2 + s3, // s3 + s4, // s4 + s5, // s5 + s6, // s6 (roots in Javascript code) + s7, // s7 (cp in Javascript code) + fp}; // fp/s8 + +const int kNumCalleeSaved = 9; + +const DoubleRegList kCalleeSavedFPU = {f20, f22, f24, f26, f28, f30}; + +const int kNumCalleeSavedFPU = 6; + +const DoubleRegList kCallerSavedFPU = {f0, f2, f4, f6, f8, + f10, f12, f14, f16, f18}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_MIPS_REGLIST_MIPS_H_ diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h new file mode 100644 index 00000000000000..f463c9a98739eb --- /dev/null +++ b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h @@ -0,0 +1,327 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2021 the V8 project authors. All rights reserved. + +#ifndef V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_ +#define V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_ + +#include "src/codegen/assembler.h" +#include "src/codegen/riscv64/assembler-riscv64.h" +#include "src/debug/debug.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } + +// ----------------------------------------------------------------------------- +// Operand and MemOperand. + +bool Operand::is_reg() const { return rm_.is_valid(); } + +int64_t Operand::immediate() const { + DCHECK(!is_reg()); + DCHECK(!IsHeapObjectRequest()); + return value_.immediate; +} + +// ----------------------------------------------------------------------------- +// RelocInfo. + +void RelocInfo::apply(intptr_t delta) { + if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { + // Absolute code pointer inside code object moves with the code object. + Assembler::RelocateInternalReference(rmode_, pc_, delta); + } else { + DCHECK(IsRelativeCodeTarget(rmode_)); + Assembler::RelocateRelativeReference(rmode_, pc_, delta); + } +} + +Address RelocInfo::target_address() { + DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) || + IsWasmCall(rmode_)); + return Assembler::target_address_at(pc_, constant_pool_); +} + +Address RelocInfo::target_address_address() { + DCHECK(HasTargetAddressAddress()); + // Read the address of the word containing the target_address in an + // instruction stream. + // The only architecture-independent user of this function is the serializer. + // The serializer uses it to find out how many raw bytes of instruction to + // output before the next target. + // For an instruction like LUI/ORI where the target bits are mixed into the + // instruction bits, the size of the target will be zero, indicating that the + // serializer should not step forward in memory after a target is resolved + // and written. In this case the target_address_address function should + // return the end of the instructions to be patched, allowing the + // deserializer to deserialize the instructions as raw bytes and put them in + // place, ready to be patched with the target. After jump optimization, + // that is the address of the instruction that follows J/JAL/JR/JALR + // instruction. + return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize; +} + +Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } + +int RelocInfo::target_address_size() { + if (IsCodedSpecially()) { + return Assembler::kSpecialTargetSize; + } else { + return kSystemPointerSize; + } +} + +void Assembler::set_target_compressed_address_at( + Address pc, Address constant_pool, Tagged_t target, + ICacheFlushMode icache_flush_mode) { + Assembler::set_target_address_at( + pc, constant_pool, static_cast

(target), icache_flush_mode); +} + +Tagged_t Assembler::target_compressed_address_at(Address pc, + Address constant_pool) { + return static_cast(target_address_at(pc, constant_pool)); +} + +Handle Assembler::code_target_object_handle_at(Address pc, + Address constant_pool) { + int index = + static_cast(target_address_at(pc, constant_pool)) & 0xFFFFFFFF; + return GetCodeTarget(index); +} + +Handle Assembler::compressed_embedded_object_handle_at( + Address pc, Address const_pool) { + return GetEmbeddedObject(target_compressed_address_at(pc, const_pool)); +} + +void Assembler::deserialization_set_special_target_at( + Address instruction_payload, Code code, Address target) { + set_target_address_at(instruction_payload, + !code.is_null() ? code.constant_pool() : kNullAddress, + target); +} + +int Assembler::deserialization_special_target_size( + Address instruction_payload) { + return kSpecialTargetSize; +} + +void Assembler::set_target_internal_reference_encoded_at(Address pc, + Address target) { + set_target_value_at(pc, static_cast(target)); +} + +void Assembler::deserialization_set_target_internal_reference_at( + Address pc, Address target, RelocInfo::Mode mode) { + if (RelocInfo::IsInternalReferenceEncoded(mode)) { + DCHECK(IsLui(instr_at(pc))); + set_target_internal_reference_encoded_at(pc, target); + } else { + DCHECK(RelocInfo::IsInternalReference(mode)); + Memory
(pc) = target; + } +} + +HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { + DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); + if (IsDataEmbeddedObject(rmode_)) { + return HeapObject::cast(Object(ReadUnalignedValue
(pc_))); + } else if (IsCompressedEmbeddedObject(rmode_)) { + return HeapObject::cast(Object(DecompressTaggedAny( + cage_base, + Assembler::target_compressed_address_at(pc_, constant_pool_)))); + } else { + return HeapObject::cast( + Object(Assembler::target_address_at(pc_, constant_pool_))); + } +} + +Handle RelocInfo::target_object_handle(Assembler* origin) { + if (IsDataEmbeddedObject(rmode_)) { + return Handle::cast(ReadUnalignedValue>(pc_)); + } else if (IsCodeTarget(rmode_)) { + return Handle::cast( + origin->code_target_object_handle_at(pc_, constant_pool_)); + } else if (IsCompressedEmbeddedObject(rmode_)) { + return origin->compressed_embedded_object_handle_at(pc_, constant_pool_); + } else if (IsFullEmbeddedObject(rmode_)) { + return Handle(reinterpret_cast( + Assembler::target_address_at(pc_, constant_pool_))); + } else { + DCHECK(IsRelativeCodeTarget(rmode_)); + return origin->relative_code_target_object_handle_at(pc_); + } +} + +void RelocInfo::set_target_object(Heap* heap, HeapObject target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); + if (IsDataEmbeddedObject(rmode_)) { + WriteUnalignedValue(pc_, target.ptr()); + // No need to flush icache since no instructions were changed. + } else if (IsCompressedEmbeddedObject(rmode_)) { + Assembler::set_target_compressed_address_at( + pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode); + } else { + DCHECK(IsFullEmbeddedObject(rmode_)); + Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), + icache_flush_mode); + } + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { + WriteBarrierForCode(host(), this, target); + } +} + +Address RelocInfo::target_external_reference() { + DCHECK(rmode_ == EXTERNAL_REFERENCE); + return Assembler::target_address_at(pc_, constant_pool_); +} + +void RelocInfo::set_target_external_reference( + Address target, ICacheFlushMode icache_flush_mode) { + DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); + Assembler::set_target_address_at(pc_, constant_pool_, target, + icache_flush_mode); +} + +Address RelocInfo::target_internal_reference() { + if (IsInternalReference(rmode_)) { + return Memory
(pc_); + } else { + // Encoded internal references are j/jal instructions. + DCHECK(IsInternalReferenceEncoded(rmode_)); + DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize))); + Address address = Assembler::target_address_at(pc_); + return address; + } +} + +Address RelocInfo::target_internal_reference_address() { + DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)); + return pc_; +} + +Handle Assembler::relative_code_target_object_handle_at( + Address pc) const { + Instr instr1 = Assembler::instr_at(pc); + Instr instr2 = Assembler::instr_at(pc + kInstrSize); + DCHECK(IsAuipc(instr1)); + DCHECK(IsJalr(instr2)); + int32_t code_target_index = BrachlongOffset(instr1, instr2); + return GetCodeTarget(code_target_index); +} + +Address RelocInfo::target_runtime_entry(Assembler* origin) { + DCHECK(IsRuntimeEntry(rmode_)); + return target_address(); +} + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode write_barrier_mode, + ICacheFlushMode icache_flush_mode) { + DCHECK(IsRuntimeEntry(rmode_)); + if (target_address() != target) + set_target_address(target, write_barrier_mode, icache_flush_mode); +} + +Address RelocInfo::target_off_heap_target() { + DCHECK(IsOffHeapTarget(rmode_)); + return Assembler::target_address_at(pc_, constant_pool_); +} + +void RelocInfo::WipeOut() { + DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || + IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || + IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || + IsOffHeapTarget(rmode_)); + if (IsInternalReference(rmode_)) { + Memory
(pc_) = kNullAddress; + } else if (IsInternalReferenceEncoded(rmode_)) { + Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); + } else { + Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); + } +} + +// ----------------------------------------------------------------------------- +// Assembler. + +void Assembler::CheckBuffer() { + if (buffer_space() <= kGap) { + GrowBuffer(); + } +} + +template +void Assembler::EmitHelper(T x) { + *reinterpret_cast(pc_) = x; + pc_ += sizeof(x); +} + +void Assembler::emit(Instr x) { + if (!is_buffer_growth_blocked()) { + CheckBuffer(); + } + DEBUG_PRINTF("%p: ", pc_); + disassembleInstr(x); + EmitHelper(x); + CheckTrampolinePoolQuick(); +} + +void Assembler::emit(ShortInstr x) { + if (!is_buffer_growth_blocked()) { + CheckBuffer(); + } + DEBUG_PRINTF("%p: ", pc_); + disassembleInstr(x); + EmitHelper(x); + CheckTrampolinePoolQuick(); +} + +void Assembler::emit(uint64_t data) { + if (!is_buffer_growth_blocked()) CheckBuffer(); + EmitHelper(data); +} + +EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_ diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc new file mode 100644 index 00000000000000..c24fb31a7bb397 --- /dev/null +++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc @@ -0,0 +1,4095 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2021 the V8 project authors. All rights reserved. + +#if V8_TARGET_ARCH_RISCV64 + +#include "src/codegen/riscv64/assembler-riscv64.h" + +#include "src/base/cpu.h" +#include "src/codegen/riscv64/assembler-riscv64-inl.h" +#include "src/codegen/safepoint-table.h" +#include "src/codegen/string-constants.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/diagnostics/disasm.h" +#include "src/diagnostics/disassembler.h" +#include "src/objects/heap-number-inl.h" + +namespace v8 { +namespace internal { +// Get the CPU features enabled by the build. For cross compilation the +// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS +// can be defined to enable FPU instructions when building the +// snapshot. +static unsigned CpuFeaturesImpliedByCompiler() { + unsigned answer = 0; +#ifdef CAN_USE_FPU_INSTRUCTIONS + answer |= 1u << FPU; +#endif // def CAN_USE_FPU_INSTRUCTIONS + +#if (defined CAN_USE_RVV_INSTRUCTIONS) + answer |= 1u << RISCV_SIMD; +#endif // def CAN_USE_RVV_INSTRUCTIONS || USE_SIMULATOR + return answer; +} + +bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); } + +void CpuFeatures::ProbeImpl(bool cross_compile) { + supported_ |= CpuFeaturesImpliedByCompiler(); + // Only use statically determined features for cross compile (snapshot). + if (cross_compile) return; + // Probe for additional features at runtime. + base::CPU cpu; + if (cpu.has_fpu()) supported_ |= 1u << FPU; + if (cpu.has_rvv()) supported_ |= 1u << RISCV_SIMD; + // Set a static value on whether SIMD is supported. + // This variable is only used for certain archs to query SupportWasmSimd128() + // at runtime in builtins using an extern ref. Other callers should use + // CpuFeatures::SupportWasmSimd128(). + CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128(); +} + +void CpuFeatures::PrintTarget() {} +void CpuFeatures::PrintFeatures() {} +int ToNumber(Register reg) { + DCHECK(reg.is_valid()); + const int kNumbers[] = { + 0, // zero_reg + 1, // ra + 2, // sp + 3, // gp + 4, // tp + 5, // t0 + 6, // t1 + 7, // t2 + 8, // s0/fp + 9, // s1 + 10, // a0 + 11, // a1 + 12, // a2 + 13, // a3 + 14, // a4 + 15, // a5 + 16, // a6 + 17, // a7 + 18, // s2 + 19, // s3 + 20, // s4 + 21, // s5 + 22, // s6 + 23, // s7 + 24, // s8 + 25, // s9 + 26, // s10 + 27, // s11 + 28, // t3 + 29, // t4 + 30, // t5 + 31, // t6 + }; + return kNumbers[reg.code()]; +} + +Register ToRegister(int num) { + DCHECK(num >= 0 && num < kNumRegisters); + const Register kRegisters[] = { + zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5, + a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6}; + return kRegisters[num]; +} + +// ----------------------------------------------------------------------------- +// Implementation of RelocInfo. + +const int RelocInfo::kApplyMask = + RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | + RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) | + RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); + +bool RelocInfo::IsCodedSpecially() { + // The deserializer needs to know whether a pointer is specially coded. Being + // specially coded on RISC-V means that it is a lui/addi instruction, and that + // is always the case inside code objects. + return true; +} + +bool RelocInfo::IsInConstantPool() { return false; } + +uint32_t RelocInfo::wasm_call_tag() const { + DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); + return static_cast( + Assembler::target_address_at(pc_, constant_pool_)); +} + +// ----------------------------------------------------------------------------- +// Implementation of Operand and MemOperand. +// See assembler-riscv64-inl.h for inlined constructors. + +Operand::Operand(Handle handle) + : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) { + value_.immediate = static_cast(handle.address()); +} + +Operand Operand::EmbeddedNumber(double value) { + int32_t smi; + if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); + Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); + result.is_heap_object_request_ = true; + result.value_.heap_object_request = HeapObjectRequest(value); + return result; +} + +Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { + Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); + result.is_heap_object_request_ = true; + result.value_.heap_object_request = HeapObjectRequest(str); + return result; +} + +MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) { + offset_ = offset; +} + +MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier, + OffsetAddend offset_addend) + : Operand(rm) { + offset_ = unit * multiplier + offset_addend; +} + +void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { + DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); + for (auto& request : heap_object_requests_) { + Handle object; + switch (request.kind()) { + case HeapObjectRequest::kHeapNumber: + object = isolate->factory()->NewHeapNumber( + request.heap_number()); + break; + case HeapObjectRequest::kStringConstant: + const StringConstantBase* str = request.string(); + CHECK_NOT_NULL(str); + object = str->AllocateStringConstant(isolate); + break; + } + Address pc = reinterpret_cast
(buffer_start_) + request.offset(); + set_target_value_at(pc, reinterpret_cast(object.location())); + } +} + +// ----------------------------------------------------------------------------- +// Specific instructions, constants, and masks. + +Assembler::Assembler(const AssemblerOptions& options, + std::unique_ptr buffer) + : AssemblerBase(options, std::move(buffer)), + VU(this), + scratch_register_list_({t3, t5}), + constpool_(this) { + reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); + + last_trampoline_pool_end_ = 0; + no_trampoline_pool_before_ = 0; + trampoline_pool_blocked_nesting_ = 0; + // We leave space (16 * kTrampolineSlotsSize) + // for BlockTrampolinePoolScope buffer. + next_buffer_check_ = FLAG_force_long_branches + ? kMaxInt + : kMaxBranchOffset - kTrampolineSlotsSize * 16; + internal_trampoline_exception_ = false; + last_bound_pos_ = 0; + + trampoline_emitted_ = FLAG_force_long_branches; + unbound_labels_count_ = 0; + block_buffer_growth_ = false; +} + +void Assembler::AbortedCodeGeneration() { constpool_.Clear(); } +Assembler::~Assembler() { CHECK(constpool_.IsEmpty()); } + +void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, + SafepointTableBuilder* safepoint_table_builder, + int handler_table_offset) { + // As a crutch to avoid having to add manual Align calls wherever we use a + // raw workflow to create Code objects (mostly in tests), add another Align + // call here. It does no harm - the end of the Code object is aligned to the + // (larger) kCodeAlignment anyways. + // TODO(jgruber): Consider moving responsibility for proper alignment to + // metadata table builders (safepoint, handler, constant pool, code + // comments). + DataAlign(Code::kMetadataAlignment); + + ForceConstantPoolEmissionWithoutJump(); + + int code_comments_size = WriteCodeComments(); + + DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. + + AllocateAndInstallRequestedHeapObjects(isolate); + + // Set up code descriptor. + // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to + // this point to make CodeDesc initialization less fiddly. + + static constexpr int kConstantPoolSize = 0; + const int instruction_size = pc_offset(); + const int code_comments_offset = instruction_size - code_comments_size; + const int constant_pool_offset = code_comments_offset - kConstantPoolSize; + const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) + ? constant_pool_offset + : handler_table_offset; + const int safepoint_table_offset = + (safepoint_table_builder == kNoSafepointTable) + ? handler_table_offset2 + : safepoint_table_builder->safepoint_table_offset(); + const int reloc_info_offset = + static_cast(reloc_info_writer.pos() - buffer_->start()); + CodeDesc::Initialize(desc, this, safepoint_table_offset, + handler_table_offset2, constant_pool_offset, + code_comments_offset, reloc_info_offset); +} + +void Assembler::Align(int m) { + DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); + while ((pc_offset() & (m - 1)) != 0) { + NOP(); + } +} + +void Assembler::CodeTargetAlign() { + // No advantage to aligning branch/call targets to more than + // single instruction, that I am aware of. + Align(4); +} + +// Labels refer to positions in the (to be) generated code. +// There are bound, linked, and unused labels. +// +// Bound labels refer to known positions in the already +// generated code. pos() is the position the label refers to. +// +// Linked labels refer to unknown positions in the code +// to be generated; pos() is the position of the last +// instruction using the label. + +// The link chain is terminated by a value in the instruction of 0, +// which is an otherwise illegal value (branch 0 is inf loop). When this case +// is detected, return an position of -1, an otherwise illegal position. +const int kEndOfChain = -1; +const int kEndOfJumpChain = 0; + +bool Assembler::IsBranch(Instr instr) { + return (instr & kBaseOpcodeMask) == BRANCH; +} + +bool Assembler::IsCBranch(Instr instr) { + int Op = instr & kRvcOpcodeMask; + return Op == RO_C_BNEZ || Op == RO_C_BEQZ; +} +bool Assembler::IsJump(Instr instr) { + int Op = instr & kBaseOpcodeMask; + return Op == JAL || Op == JALR; +} + +bool Assembler::IsNop(Instr instr) { return instr == kNopByte; } + +bool Assembler::IsJal(Instr instr) { return (instr & kBaseOpcodeMask) == JAL; } + +bool Assembler::IsJalr(Instr instr) { + return (instr & kBaseOpcodeMask) == JALR; +} + +bool Assembler::IsCJal(Instr instr) { + return (instr & kRvcOpcodeMask) == RO_C_J; +} + +bool Assembler::IsLui(Instr instr) { return (instr & kBaseOpcodeMask) == LUI; } +bool Assembler::IsAuipc(Instr instr) { + return (instr & kBaseOpcodeMask) == AUIPC; +} +bool Assembler::IsAddiw(Instr instr) { + return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW; +} +bool Assembler::IsAddi(Instr instr) { + return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI; +} +bool Assembler::IsOri(Instr instr) { + return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI; +} +bool Assembler::IsSlli(Instr instr) { + return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI; +} + +bool Assembler::IsLd(Instr instr) { + return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD; +} + +int Assembler::target_at(int pos, bool is_internal) { + if (is_internal) { + int64_t* p = reinterpret_cast(buffer_start_ + pos); + int64_t address = *p; + if (address == kEndOfJumpChain) { + return kEndOfChain; + } else { + int64_t instr_address = reinterpret_cast(p); + DCHECK(instr_address - address < INT_MAX); + int delta = static_cast(instr_address - address); + DCHECK(pos > delta); + return pos - delta; + } + } + Instruction* instruction = Instruction::At(buffer_start_ + pos); + DEBUG_PRINTF("target_at: %p (%d)\n\t", + reinterpret_cast(buffer_start_ + pos), pos); + Instr instr = instruction->InstructionBits(); + disassembleInstr(instruction->InstructionBits()); + + switch (instruction->InstructionOpcodeType()) { + case BRANCH: { + int32_t imm13 = BranchOffset(instr); + if (imm13 == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + return pos + imm13; + } + } + case JAL: { + int32_t imm21 = JumpOffset(instr); + if (imm21 == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + return pos + imm21; + } + } + case JALR: { + int32_t imm12 = instr >> 20; + if (imm12 == kEndOfJumpChain) { + // EndOfChain sentinel is returned directly, not relative to pc or pos. + return kEndOfChain; + } else { + return pos + imm12; + } + } + case LUI: { + Address pc = reinterpret_cast
(buffer_start_ + pos); + pc = target_address_at(pc); + uint64_t instr_address = reinterpret_cast(buffer_start_ + pos); + uint64_t imm = reinterpret_cast(pc); + if (imm == kEndOfJumpChain) { + return kEndOfChain; + } else { + DCHECK(instr_address - imm < INT_MAX); + int32_t delta = static_cast(instr_address - imm); + DCHECK(pos > delta); + return pos - delta; + } + } + case AUIPC: { + Instr instr_auipc = instr; + Instr instr_I = instr_at(pos + 4); + DCHECK(IsJalr(instr_I) || IsAddi(instr_I)); + int32_t offset = BrachlongOffset(instr_auipc, instr_I); + if (offset == kEndOfJumpChain) return kEndOfChain; + return offset + pos; + } + case RO_C_J: { + int32_t offset = instruction->RvcImm11CJValue(); + if (offset == kEndOfJumpChain) return kEndOfChain; + return offset + pos; + } + case RO_C_BNEZ: + case RO_C_BEQZ: { + int32_t offset = instruction->RvcImm8BValue(); + if (offset == kEndOfJumpChain) return kEndOfChain; + return pos + offset; + } + default: { + if (instr == kEndOfJumpChain) { + return kEndOfChain; + } else { + int32_t imm18 = + ((instr & static_cast(kImm16Mask)) << 16) >> 14; + return (imm18 + pos); + } + } + } +} + +static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos, + Instr instr) { + int32_t imm = target_pos - pos; + DCHECK_EQ(imm & 1, 0); + DCHECK(is_intn(imm, Assembler::kBranchOffsetBits)); + + instr &= ~kBImm12Mask; + int32_t imm12 = ((imm & 0x800) >> 4) | // bit 11 + ((imm & 0x1e) << 7) | // bits 4-1 + ((imm & 0x7e0) << 20) | // bits 10-5 + ((imm & 0x1000) << 19); // bit 12 + + return instr | (imm12 & kBImm12Mask); +} + +static inline Instr SetLdOffset(int32_t offset, Instr instr) { + DCHECK(Assembler::IsLd(instr)); + DCHECK(is_int12(offset)); + instr &= ~kImm12Mask; + int32_t imm12 = offset << kImm12Shift; + return instr | (imm12 & kImm12Mask); +} + +static inline Instr SetAuipcOffset(int32_t offset, Instr instr) { + DCHECK(Assembler::IsAuipc(instr)); + DCHECK(is_int20(offset)); + instr = (instr & ~kImm31_12Mask) | ((offset & kImm19_0Mask) << 12); + return instr; +} + +static inline Instr SetJalrOffset(int32_t offset, Instr instr) { + DCHECK(Assembler::IsJalr(instr)); + DCHECK(is_int12(offset)); + instr &= ~kImm12Mask; + int32_t imm12 = offset << kImm12Shift; + DCHECK(Assembler::IsJalr(instr | (imm12 & kImm12Mask))); + DCHECK_EQ(Assembler::JalrOffset(instr | (imm12 & kImm12Mask)), offset); + return instr | (imm12 & kImm12Mask); +} + +static inline Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr) { + DCHECK(Assembler::IsJal(instr)); + int32_t imm = target_pos - pos; + DCHECK_EQ(imm & 1, 0); + DCHECK(is_intn(imm, Assembler::kJumpOffsetBits)); + + instr &= ~kImm20Mask; + int32_t imm20 = (imm & 0xff000) | // bits 19-12 + ((imm & 0x800) << 9) | // bit 11 + ((imm & 0x7fe) << 20) | // bits 10-1 + ((imm & 0x100000) << 11); // bit 20 + + return instr | (imm20 & kImm20Mask); +} + +static inline ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos, + Instr instr) { + DCHECK(Assembler::IsCJal(instr)); + int32_t imm = target_pos - pos; + DCHECK_EQ(imm & 1, 0); + DCHECK(is_intn(imm, Assembler::kCJalOffsetBits)); + instr &= ~kImm11Mask; + int16_t imm11 = ((imm & 0x800) >> 1) | ((imm & 0x400) >> 4) | + ((imm & 0x300) >> 1) | ((imm & 0x80) >> 3) | + ((imm & 0x40) >> 1) | ((imm & 0x20) >> 5) | + ((imm & 0x10) << 5) | (imm & 0xe); + imm11 = imm11 << kImm11Shift; + DCHECK(Assembler::IsCJal(instr | (imm11 & kImm11Mask))); + return instr | (imm11 & kImm11Mask); +} +static inline Instr SetCBranchOffset(int32_t pos, int32_t target_pos, + Instr instr) { + DCHECK(Assembler::IsCBranch(instr)); + int32_t imm = target_pos - pos; + DCHECK_EQ(imm & 1, 0); + DCHECK(is_intn(imm, Assembler::kCBranchOffsetBits)); + + instr &= ~kRvcBImm8Mask; + int32_t imm8 = ((imm & 0x20) >> 5) | ((imm & 0x6)) | ((imm & 0xc0) >> 3) | + ((imm & 0x18) << 2) | ((imm & 0x100) >> 1); + imm8 = ((imm8 & 0x1f) << 2) | ((imm8 & 0xe0) << 5); + DCHECK(Assembler::IsCBranch(instr | imm8 & kRvcBImm8Mask)); + + return instr | (imm8 & kRvcBImm8Mask); +} + +void Assembler::target_at_put(int pos, int target_pos, bool is_internal, + bool trampoline) { + if (is_internal) { + uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; + *reinterpret_cast(buffer_start_ + pos) = imm; + return; + } + DEBUG_PRINTF("target_at_put: %p (%d) to %p (%d)\n", + reinterpret_cast(buffer_start_ + pos), pos, + reinterpret_cast(buffer_start_ + target_pos), + target_pos); + Instruction* instruction = Instruction::At(buffer_start_ + pos); + Instr instr = instruction->InstructionBits(); + + switch (instruction->InstructionOpcodeType()) { + case BRANCH: { + instr = SetBranchOffset(pos, target_pos, instr); + instr_at_put(pos, instr); + } break; + case JAL: { + DCHECK(IsJal(instr)); + instr = SetJalOffset(pos, target_pos, instr); + instr_at_put(pos, instr); + } break; + case LUI: { + Address pc = reinterpret_cast
(buffer_start_ + pos); + set_target_value_at( + pc, reinterpret_cast(buffer_start_ + target_pos)); + } break; + case AUIPC: { + Instr instr_auipc = instr; + Instr instr_I = instr_at(pos + 4); + DCHECK(IsJalr(instr_I) || IsAddi(instr_I)); + + int64_t offset = target_pos - pos; + if (is_int21(offset) && IsJalr(instr_I) && trampoline) { + DCHECK(is_int21(offset) && ((offset & 1) == 0)); + Instr instr = JAL; + instr = SetJalOffset(pos, target_pos, instr); + DCHECK(IsJal(instr)); + DCHECK(JumpOffset(instr) == offset); + instr_at_put(pos, instr); + instr_at_put(pos + 4, kNopByte); + } else { + CHECK(is_int32(offset + 0x800)); + + int32_t Hi20 = (((int32_t)offset + 0x800) >> 12); + int32_t Lo12 = (int32_t)offset << 20 >> 20; + + instr_auipc = + (instr_auipc & ~kImm31_12Mask) | ((Hi20 & kImm19_0Mask) << 12); + instr_at_put(pos, instr_auipc); + + const int kImm31_20Mask = ((1 << 12) - 1) << 20; + const int kImm11_0Mask = ((1 << 12) - 1); + instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20); + instr_at_put(pos + 4, instr_I); + } + } break; + case RO_C_J: { + ShortInstr short_instr = SetCJalOffset(pos, target_pos, instr); + instr_at_put(pos, short_instr); + } break; + case RO_C_BNEZ: + case RO_C_BEQZ: { + instr = SetCBranchOffset(pos, target_pos, instr); + instr_at_put(pos, instr); + } break; + default: { + // Emitted label constant, not part of a branch. + // Make label relative to Code pointer of generated Code object. + instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + } break; + } + disassembleInstr(instr); +} + +void Assembler::print(const Label* L) { + if (L->is_unused()) { + PrintF("unused label\n"); + } else if (L->is_bound()) { + PrintF("bound label to %d\n", L->pos()); + } else if (L->is_linked()) { + Label l; + l.link_to(L->pos()); + PrintF("unbound label"); + while (l.is_linked()) { + PrintF("@ %d ", l.pos()); + Instr instr = instr_at(l.pos()); + if ((instr & ~kImm16Mask) == 0) { + PrintF("value\n"); + } else { + PrintF("%d\n", instr); + } + next(&l, is_internal_reference(&l)); + } + } else { + PrintF("label in inconsistent state (pos = %d)\n", L->pos_); + } +} + +void Assembler::bind_to(Label* L, int pos) { + DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. + DEBUG_PRINTF("binding %d to label %p\n", pos, L); + int trampoline_pos = kInvalidSlotPos; + bool is_internal = false; + if (L->is_linked() && !trampoline_emitted_) { + unbound_labels_count_--; + if (!is_internal_reference(L)) { + next_buffer_check_ += kTrampolineSlotsSize; + } + } + + while (L->is_linked()) { + int fixup_pos = L->pos(); + int dist = pos - fixup_pos; + is_internal = is_internal_reference(L); + next(L, is_internal); // Call next before overwriting link with target + // at fixup_pos. + Instr instr = instr_at(fixup_pos); + DEBUG_PRINTF("\tfixup: %d to %d\n", fixup_pos, dist); + if (is_internal) { + target_at_put(fixup_pos, pos, is_internal); + } else { + if (IsBranch(instr)) { + if (dist > kMaxBranchOffset) { + if (trampoline_pos == kInvalidSlotPos) { + trampoline_pos = get_trampoline_entry(fixup_pos); + CHECK_NE(trampoline_pos, kInvalidSlotPos); + } + CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); + DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos); + target_at_put(fixup_pos, trampoline_pos, false, true); + fixup_pos = trampoline_pos; + } + target_at_put(fixup_pos, pos, false); + } else if (IsJal(instr)) { + if (dist > kMaxJumpOffset) { + if (trampoline_pos == kInvalidSlotPos) { + trampoline_pos = get_trampoline_entry(fixup_pos); + CHECK_NE(trampoline_pos, kInvalidSlotPos); + } + CHECK((trampoline_pos - fixup_pos) <= kMaxJumpOffset); + DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos); + target_at_put(fixup_pos, trampoline_pos, false, true); + fixup_pos = trampoline_pos; + } + target_at_put(fixup_pos, pos, false); + } else { + target_at_put(fixup_pos, pos, false); + } + } + } + L->bind_to(pos); + + // Keep track of the last bound label so we don't eliminate any instructions + // before a bound label. + if (pos > last_bound_pos_) last_bound_pos_ = pos; +} + +void Assembler::bind(Label* L) { + DCHECK(!L->is_bound()); // Label can only be bound once. + bind_to(L, pc_offset()); +} + +void Assembler::next(Label* L, bool is_internal) { + DCHECK(L->is_linked()); + int link = target_at(L->pos(), is_internal); + if (link == kEndOfChain) { + L->Unuse(); + } else { + DCHECK_GE(link, 0); + DEBUG_PRINTF("next: %p to %p (%d)\n", L, + reinterpret_cast(buffer_start_ + link), link); + L->link_to(link); + } +} + +bool Assembler::is_near(Label* L) { + DCHECK(L->is_bound()); + return is_intn((pc_offset() - L->pos()), kJumpOffsetBits); +} + +bool Assembler::is_near(Label* L, OffsetSize bits) { + if (L == nullptr || !L->is_bound()) return true; + return is_intn((pc_offset() - L->pos()), bits); +} + +bool Assembler::is_near_branch(Label* L) { + DCHECK(L->is_bound()); + return is_intn((pc_offset() - L->pos()), kBranchOffsetBits); +} + +int Assembler::BranchOffset(Instr instr) { + // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode | + // 31 25 11 7 + int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) | + ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19); + imm13 = imm13 << 19 >> 19; + return imm13; +} + +int Assembler::JumpOffset(Instr instr) { + int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) | + (instr & 0xff000) | ((instr & 0x80000000) >> 11); + imm21 = imm21 << 11 >> 11; + return imm21; +} + +int Assembler::CJumpOffset(Instr instr) { + int32_t imm12 = ((instr & 0x4) << 3) | ((instr & 0x38) >> 2) | + ((instr & 0x40) << 1) | ((instr & 0x80) >> 1) | + ((instr & 0x100) << 2) | ((instr & 0x600) >> 1) | + ((instr & 0x800) >> 7) | ((instr & 0x1000) >> 1); + imm12 = imm12 << 20 >> 20; + return imm12; +} + +int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) { + DCHECK(reinterpret_cast(&instr_I)->InstructionType() == + InstructionBase::kIType); + DCHECK(IsAuipc(auipc)); + DCHECK_EQ((auipc & kRdFieldMask) >> kRdShift, + (instr_I & kRs1FieldMask) >> kRs1Shift); + int32_t imm_auipc = AuipcOffset(auipc); + int32_t imm12 = static_cast(instr_I & kImm12Mask) >> 20; + int32_t offset = imm12 + imm_auipc; + return offset; +} + +int Assembler::PatchBranchlongOffset(Address pc, Instr instr_auipc, + Instr instr_jalr, int32_t offset) { + DCHECK(IsAuipc(instr_auipc)); + DCHECK(IsJalr(instr_jalr)); + CHECK(is_int32(offset + 0x800)); + int32_t Hi20 = (((int32_t)offset + 0x800) >> 12); + int32_t Lo12 = (int32_t)offset << 20 >> 20; + instr_at_put(pc, SetAuipcOffset(Hi20, instr_auipc)); + instr_at_put(pc + 4, SetJalrOffset(Lo12, instr_jalr)); + DCHECK(offset == + BrachlongOffset(Assembler::instr_at(pc), Assembler::instr_at(pc + 4))); + return 2; +} + +int Assembler::LdOffset(Instr instr) { + DCHECK(IsLd(instr)); + int32_t imm12 = static_cast(instr & kImm12Mask) >> 20; + return imm12; +} + +int Assembler::JalrOffset(Instr instr) { + DCHECK(IsJalr(instr)); + int32_t imm12 = static_cast(instr & kImm12Mask) >> 20; + return imm12; +} + +int Assembler::AuipcOffset(Instr instr) { + DCHECK(IsAuipc(instr)); + int32_t imm20 = static_cast(instr & kImm20Mask); + return imm20; +} +// We have to use a temporary register for things that can be relocated even +// if they can be encoded in RISC-V's 12 bits of immediate-offset instruction +// space. There is no guarantee that the relocated location can be similarly +// encoded. +bool Assembler::MustUseReg(RelocInfo::Mode rmode) { + return !RelocInfo::IsNoInfo(rmode); +} + +void Assembler::disassembleInstr(Instr instr) { + if (!FLAG_riscv_debug) return; + disasm::NameConverter converter; + disasm::Disassembler disasm(converter); + base::EmbeddedVector disasm_buffer; + + disasm.InstructionDecode(disasm_buffer, reinterpret_cast(&instr)); + DEBUG_PRINTF("%s\n", disasm_buffer.begin()); +} + +// ----- Top-level instruction formats match those in the ISA manual +// (R, I, S, B, U, J). These match the formats defined in the compiler +void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, + Register rd, Register rs1, Register rs2) { + DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() && + rs1.is_valid() && rs2.is_valid()); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct7 << kFunct7Shift); + emit(instr); +} + +void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, + FPURegister rd, FPURegister rs1, FPURegister rs2) { + DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() && + rs1.is_valid() && rs2.is_valid()); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct7 << kFunct7Shift); + emit(instr); +} + +void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, + Register rd, FPURegister rs1, Register rs2) { + DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() && + rs1.is_valid() && rs2.is_valid()); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct7 << kFunct7Shift); + emit(instr); +} + +void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, + FPURegister rd, Register rs1, Register rs2) { + DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() && + rs1.is_valid() && rs2.is_valid()); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct7 << kFunct7Shift); + emit(instr); +} + +void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, + FPURegister rd, FPURegister rs1, Register rs2) { + DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() && + rs1.is_valid() && rs2.is_valid()); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct7 << kFunct7Shift); + emit(instr); +} + +void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, + Register rd, FPURegister rs1, FPURegister rs2) { + DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() && + rs1.is_valid() && rs2.is_valid()); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct7 << kFunct7Shift); + emit(instr); +} + +void Assembler::GenInstrR4(uint8_t funct2, Opcode opcode, Register rd, + Register rs1, Register rs2, Register rs3, + RoundingMode frm) { + DCHECK(is_uint2(funct2) && rd.is_valid() && rs1.is_valid() && + rs2.is_valid() && rs3.is_valid() && is_uint3(frm)); + Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift); + emit(instr); +} + +void Assembler::GenInstrR4(uint8_t funct2, Opcode opcode, FPURegister rd, + FPURegister rs1, FPURegister rs2, FPURegister rs3, + RoundingMode frm) { + DCHECK(is_uint2(funct2) && rd.is_valid() && rs1.is_valid() && + rs2.is_valid() && rs3.is_valid() && is_uint3(frm)); + Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift); + emit(instr); +} + +void Assembler::GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, + uint8_t funct3, Register rd, Register rs1, + Register rs2) { + DCHECK(is_uint5(funct5) && is_uint3(funct3) && rd.is_valid() && + rs1.is_valid() && rs2.is_valid()); + Instr instr = AMO | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (rl << kRlShift) | (aq << kAqShift) | (funct5 << kFunct5Shift); + emit(instr); +} + +void Assembler::GenInstrRFrm(uint8_t funct7, Opcode opcode, Register rd, + Register rs1, Register rs2, RoundingMode frm) { + DCHECK(rd.is_valid() && rs1.is_valid() && rs2.is_valid() && is_uint3(frm)); + Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) | + (funct7 << kFunct7Shift); + emit(instr); +} + +void Assembler::GenInstrI(uint8_t funct3, Opcode opcode, Register rd, + Register rs1, int16_t imm12) { + DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() && + (is_uint12(imm12) || is_int12(imm12))); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift); + emit(instr); +} + +void Assembler::GenInstrI(uint8_t funct3, Opcode opcode, FPURegister rd, + Register rs1, int16_t imm12) { + DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() && + (is_uint12(imm12) || is_int12(imm12))); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift); + emit(instr); +} + +void Assembler::GenInstrIShift(bool arithshift, uint8_t funct3, Opcode opcode, + Register rd, Register rs1, uint8_t shamt) { + DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() && + is_uint6(shamt)); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (shamt << kShamtShift) | + (arithshift << kArithShiftShift); + emit(instr); +} + +void Assembler::GenInstrIShiftW(bool arithshift, uint8_t funct3, Opcode opcode, + Register rd, Register rs1, uint8_t shamt) { + DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() && + is_uint5(shamt)); + Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) | + (rs1.code() << kRs1Shift) | (shamt << kShamtWShift) | + (arithshift << kArithShiftShift); + emit(instr); +} + +void Assembler::GenInstrS(uint8_t funct3, Opcode opcode, Register rs1, + Register rs2, int16_t imm12) { + DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() && + is_int12(imm12)); + Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0 + (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) | + (rs2.code() << kRs2Shift) | + ((imm12 & 0xfe0) << 20); // bits 11-5 + emit(instr); +} + +void Assembler::GenInstrS(uint8_t funct3, Opcode opcode, Register rs1, + FPURegister rs2, int16_t imm12) { + DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() && + is_int12(imm12)); + Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0 + (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) | + (rs2.code() << kRs2Shift) | + ((imm12 & 0xfe0) << 20); // bits 11-5 + emit(instr); +} + +void Assembler::GenInstrB(uint8_t funct3, Opcode opcode, Register rs1, + Register rs2, int16_t imm13) { + DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() && + is_int13(imm13) && ((imm13 & 1) == 0)); + Instr instr = opcode | ((imm13 & 0x800) >> 4) | // bit 11 + ((imm13 & 0x1e) << 7) | // bits 4-1 + (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) | + (rs2.code() << kRs2Shift) | + ((imm13 & 0x7e0) << 20) | // bits 10-5 + ((imm13 & 0x1000) << 19); // bit 12 + emit(instr); +} + +void Assembler::GenInstrU(Opcode opcode, Register rd, int32_t imm20) { + DCHECK(rd.is_valid() && (is_int20(imm20) || is_uint20(imm20))); + Instr instr = opcode | (rd.code() << kRdShift) | (imm20 << kImm20Shift); + emit(instr); +} + +void Assembler::GenInstrJ(Opcode opcode, Register rd, int32_t imm21) { + DCHECK(rd.is_valid() && is_int21(imm21) && ((imm21 & 1) == 0)); + Instr instr = opcode | (rd.code() << kRdShift) | + (imm21 & 0xff000) | // bits 19-12 + ((imm21 & 0x800) << 9) | // bit 11 + ((imm21 & 0x7fe) << 20) | // bits 10-1 + ((imm21 & 0x100000) << 11); // bit 20 + emit(instr); +} + +void Assembler::GenInstrCR(uint8_t funct4, Opcode opcode, Register rd, + Register rs2) { + DCHECK(is_uint4(funct4) && rd.is_valid() && rs2.is_valid()); + ShortInstr instr = opcode | (rs2.code() << kRvcRs2Shift) | + (rd.code() << kRvcRdShift) | (funct4 << kRvcFunct4Shift); + emit(instr); +} + +void Assembler::GenInstrCA(uint8_t funct6, Opcode opcode, Register rd, + uint8_t funct, Register rs2) { + DCHECK(is_uint6(funct6) && rd.is_valid() && rs2.is_valid() && + is_uint2(funct)); + ShortInstr instr = opcode | ((rs2.code() & 0x7) << kRvcRs2sShift) | + ((rd.code() & 0x7) << kRvcRs1sShift) | + (funct6 << kRvcFunct6Shift) | (funct << kRvcFunct2Shift); + emit(instr); +} + +void Assembler::GenInstrCI(uint8_t funct3, Opcode opcode, Register rd, + int8_t imm6) { + DCHECK(is_uint3(funct3) && rd.is_valid() && is_int6(imm6)); + ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | + (rd.code() << kRvcRdShift) | ((imm6 & 0x20) << 7) | + (funct3 << kRvcFunct3Shift); + emit(instr); +} + +void Assembler::GenInstrCIU(uint8_t funct3, Opcode opcode, Register rd, + uint8_t uimm6) { + DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint6(uimm6)); + ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) | + (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) | + (funct3 << kRvcFunct3Shift); + emit(instr); +} + +void Assembler::GenInstrCIU(uint8_t funct3, Opcode opcode, FPURegister rd, + uint8_t uimm6) { + DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint6(uimm6)); + ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) | + (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) | + (funct3 << kRvcFunct3Shift); + emit(instr); +} + +void Assembler::GenInstrCIW(uint8_t funct3, Opcode opcode, Register rd, + uint8_t uimm8) { + DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint8(uimm8)); + ShortInstr instr = opcode | ((uimm8) << 5) | + ((rd.code() & 0x7) << kRvcRs2sShift) | + (funct3 << kRvcFunct3Shift); + emit(instr); +} + +void Assembler::GenInstrCSS(uint8_t funct3, Opcode opcode, Register rs2, + uint8_t uimm6) { + DCHECK(is_uint3(funct3) && rs2.is_valid() && is_uint6(uimm6)); + ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) | + (funct3 << kRvcFunct3Shift); + emit(instr); +} + +void Assembler::GenInstrCSS(uint8_t funct3, Opcode opcode, FPURegister rs2, + uint8_t uimm6) { + DCHECK(is_uint3(funct3) && rs2.is_valid() && is_uint6(uimm6)); + ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) | + (funct3 << kRvcFunct3Shift); + emit(instr); +} + +void Assembler::GenInstrCL(uint8_t funct3, Opcode opcode, Register rd, + Register rs1, uint8_t uimm5) { + DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() && + is_uint5(uimm5)); + ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) | + ((rd.code() & 0x7) << kRvcRs2sShift) | + ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) | + ((rs1.code() & 0x7) << kRvcRs1sShift); + emit(instr); +} + +void Assembler::GenInstrCL(uint8_t funct3, Opcode opcode, FPURegister rd, + Register rs1, uint8_t uimm5) { + DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() && + is_uint5(uimm5)); + ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) | + ((rd.code() & 0x7) << kRvcRs2sShift) | + ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) | + ((rs1.code() & 0x7) << kRvcRs1sShift); + emit(instr); +} +void Assembler::GenInstrCJ(uint8_t funct3, Opcode opcode, uint16_t uint11) { + DCHECK(is_uint11(uint11)); + ShortInstr instr = opcode | (funct3 << kRvcFunct3Shift) | (uint11 << 2); + emit(instr); +} + +void Assembler::GenInstrCS(uint8_t funct3, Opcode opcode, Register rs2, + Register rs1, uint8_t uimm5) { + DCHECK(is_uint3(funct3) && rs2.is_valid() && rs1.is_valid() && + is_uint5(uimm5)); + ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) | + ((rs2.code() & 0x7) << kRvcRs2sShift) | + ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) | + ((rs1.code() & 0x7) << kRvcRs1sShift); + emit(instr); +} + +void Assembler::GenInstrCS(uint8_t funct3, Opcode opcode, FPURegister rs2, + Register rs1, uint8_t uimm5) { + DCHECK(is_uint3(funct3) && rs2.is_valid() && rs1.is_valid() && + is_uint5(uimm5)); + ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) | + ((rs2.code() & 0x7) << kRvcRs2sShift) | + ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) | + ((rs1.code() & 0x7) << kRvcRs1sShift); + emit(instr); +} + +void Assembler::GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1, + uint8_t uimm8) { + DCHECK(is_uint3(funct3) && is_uint8(uimm8)); + ShortInstr instr = opcode | ((uimm8 & 0x1f) << 2) | ((uimm8 & 0xe0) << 5) | + ((rs1.code() & 0x7) << kRvcRs1sShift) | + (funct3 << kRvcFunct3Shift); + emit(instr); +} + +void Assembler::GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode, + Register rs1, int8_t imm6) { + DCHECK(is_uint3(funct3) && is_uint2(funct2) && is_int6(imm6)); + ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | ((imm6 & 0x20) << 7) | + ((rs1.code() & 0x7) << kRvcRs1sShift) | + (funct3 << kRvcFunct3Shift) | (funct2 << 10); + emit(instr); +} + +// OPIVV OPFVV OPMVV +void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, + VRegister vs1, VRegister vs2, MaskType mask) { + DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV); + Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) | + ((vd.code() & 0x1F) << kRvvVdShift) | + ((vs1.code() & 0x1F) << kRvvVs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} + +void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, + int8_t vs1, VRegister vs2, MaskType mask) { + DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV); + Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) | + ((vd.code() & 0x1F) << kRvvVdShift) | + ((vs1 & 0x1F) << kRvvVs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} +// OPMVV OPFVV +void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd, + VRegister vs1, VRegister vs2, MaskType mask) { + DCHECK(opcode == OP_MVV || opcode == OP_FVV); + Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) | + ((rd.code() & 0x1F) << kRvvVdShift) | + ((vs1.code() & 0x1F) << kRvvVs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} + +// OPFVV +void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, FPURegister fd, + VRegister vs1, VRegister vs2, MaskType mask) { + DCHECK(opcode == OP_FVV); + Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) | + ((fd.code() & 0x1F) << kRvvVdShift) | + ((vs1.code() & 0x1F) << kRvvVs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} + +// OPIVX OPMVX +void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, + Register rs1, VRegister vs2, MaskType mask) { + DCHECK(opcode == OP_IVX || opcode == OP_MVX); + Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) | + ((vd.code() & 0x1F) << kRvvVdShift) | + ((rs1.code() & 0x1F) << kRvvRs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} + +// OPFVF +void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, + FPURegister fs1, VRegister vs2, MaskType mask) { + DCHECK(opcode == OP_FVF); + Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) | + ((vd.code() & 0x1F) << kRvvVdShift) | + ((fs1.code() & 0x1F) << kRvvRs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} + +// OPMVX +void Assembler::GenInstrV(uint8_t funct6, Register rd, Register rs1, + VRegister vs2, MaskType mask) { + Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) | + ((rd.code() & 0x1F) << kRvvVdShift) | + ((rs1.code() & 0x1F) << kRvvRs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} +// OPIVI +void Assembler::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5, + VRegister vs2, MaskType mask) { + DCHECK(is_uint5(imm5) || is_int5(imm5)); + Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) | + ((vd.code() & 0x1F) << kRvvVdShift) | + (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} + +// VL VS +void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd, + Register rs1, uint8_t umop, MaskType mask, + uint8_t IsMop, bool IsMew, uint8_t Nf) { + DCHECK(opcode == LOAD_FP || opcode == STORE_FP); + Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) | + ((width << kRvvWidthShift) & kRvvWidthMask) | + ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) | + ((umop << kRvvRs2Shift) & kRvvRs2Mask) | + ((mask << kRvvVmShift) & kRvvVmMask) | + ((IsMop << kRvvMopShift) & kRvvMopMask) | + ((IsMew << kRvvMewShift) & kRvvMewMask) | + ((Nf << kRvvNfShift) & kRvvNfMask); + emit(instr); +} +void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd, + Register rs1, Register rs2, MaskType mask, + uint8_t IsMop, bool IsMew, uint8_t Nf) { + DCHECK(opcode == LOAD_FP || opcode == STORE_FP); + Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) | + ((width << kRvvWidthShift) & kRvvWidthMask) | + ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) | + ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) | + ((mask << kRvvVmShift) & kRvvVmMask) | + ((IsMop << kRvvMopShift) & kRvvMopMask) | + ((IsMew << kRvvMewShift) & kRvvMewMask) | + ((Nf << kRvvNfShift) & kRvvNfMask); + emit(instr); +} +// VL VS AMO +void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd, + Register rs1, VRegister vs2, MaskType mask, + uint8_t IsMop, bool IsMew, uint8_t Nf) { + DCHECK(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO); + Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) | + ((width << kRvvWidthShift) & kRvvWidthMask) | + ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) | + ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) | + ((mask << kRvvVmShift) & kRvvVmMask) | + ((IsMop << kRvvMopShift) & kRvvMopMask) | + ((IsMew << kRvvMewShift) & kRvvMewMask) | + ((Nf << kRvvNfShift) & kRvvNfMask); + emit(instr); +} +// vmv_xs vcpop_m vfirst_m +void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd, + uint8_t vs1, VRegister vs2, MaskType mask) { + DCHECK(opcode == OP_MVV); + Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) | + ((rd.code() & 0x1F) << kRvvVdShift) | + ((vs1 & 0x1F) << kRvvVs1Shift) | + ((vs2.code() & 0x1F) << kRvvVs2Shift); + emit(instr); +} +// ----- Instruction class templates match those in the compiler + +void Assembler::GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2, + int16_t imm13) { + GenInstrB(funct3, BRANCH, rs1, rs2, imm13); +} + +void Assembler::GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1, + int16_t imm12) { + GenInstrI(funct3, LOAD, rd, rs1, imm12); +} + +void Assembler::GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2, + int16_t imm12) { + GenInstrS(funct3, STORE, rs1, rs2, imm12); +} + +void Assembler::GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, + int16_t imm12) { + GenInstrI(funct3, OP_IMM, rd, rs1, imm12); +} + +void Assembler::GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd, + Register rs1, uint8_t shamt) { + DCHECK(is_uint6(shamt)); + GenInstrI(funct3, OP_IMM, rd, rs1, (arithshift << 10) | shamt); +} + +void Assembler::GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, + Register rs1, Register rs2) { + GenInstrR(funct7, funct3, OP, rd, rs1, rs2); +} + +void Assembler::GenInstrCSR_ir(uint8_t funct3, Register rd, + ControlStatusReg csr, Register rs1) { + GenInstrI(funct3, SYSTEM, rd, rs1, csr); +} + +void Assembler::GenInstrCSR_ii(uint8_t funct3, Register rd, + ControlStatusReg csr, uint8_t imm5) { + GenInstrI(funct3, SYSTEM, rd, ToRegister(imm5), csr); +} + +void Assembler::GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd, + Register rs1, uint8_t shamt) { + GenInstrIShiftW(arithshift, funct3, OP_IMM_32, rd, rs1, shamt); +} + +void Assembler::GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd, + Register rs1, Register rs2) { + GenInstrR(funct7, funct3, OP_32, rd, rs1, rs2); +} + +void Assembler::GenInstrPriv(uint8_t funct7, Register rs1, Register rs2) { + GenInstrR(funct7, 0b000, SYSTEM, ToRegister(0), rs1, rs2); +} + +void Assembler::GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1, + int16_t imm12) { + GenInstrI(funct3, LOAD_FP, rd, rs1, imm12); +} + +void Assembler::GenInstrStoreFP_rri(uint8_t funct3, Register rs1, + FPURegister rs2, int16_t imm12) { + GenInstrS(funct3, STORE_FP, rs1, rs2, imm12); +} + +void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd, + FPURegister rs1, FPURegister rs2) { + GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2); +} + +void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd, + Register rs1, Register rs2) { + GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2); +} + +void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd, + FPURegister rs1, Register rs2) { + GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2); +} + +void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd, + FPURegister rs1, Register rs2) { + GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2); +} + +void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd, + FPURegister rs1, FPURegister rs2) { + GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2); +} + +// Returns the next free trampoline entry. +int32_t Assembler::get_trampoline_entry(int32_t pos) { + int32_t trampoline_entry = kInvalidSlotPos; + if (!internal_trampoline_exception_) { + DEBUG_PRINTF("\tstart: %d,pos: %d\n", trampoline_.start(), pos); + if (trampoline_.start() > pos) { + trampoline_entry = trampoline_.take_slot(); + } + + if (kInvalidSlotPos == trampoline_entry) { + internal_trampoline_exception_ = true; + } + } + return trampoline_entry; +} + +uint64_t Assembler::jump_address(Label* L) { + int64_t target_pos; + DEBUG_PRINTF("jump_address: %p to %p (%d)\n", L, + reinterpret_cast(buffer_start_ + pc_offset()), + pc_offset()); + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + DEBUG_PRINTF("\tstarted link\n"); + return kEndOfJumpChain; + } + } + uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; + if (FLAG_riscv_c_extension) + DCHECK_EQ(imm & 1, 0); + else + DCHECK_EQ(imm & 3, 0); + + return imm; +} + +uint64_t Assembler::branch_long_offset(Label* L) { + int64_t target_pos; + + DEBUG_PRINTF("branch_long_offset: %p to %p (%d)\n", L, + reinterpret_cast(buffer_start_ + pc_offset()), + pc_offset()); + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + DEBUG_PRINTF("\tstarted link\n"); + return kEndOfJumpChain; + } + } + int64_t offset = target_pos - pc_offset(); + if (FLAG_riscv_c_extension) + DCHECK_EQ(offset & 1, 0); + else + DCHECK_EQ(offset & 3, 0); + + return static_cast(offset); +} + +int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { + int32_t target_pos; + + DEBUG_PRINTF("branch_offset_helper: %p to %p (%d)\n", L, + reinterpret_cast(buffer_start_ + pc_offset()), + pc_offset()); + if (L->is_bound()) { + target_pos = L->pos(); + DEBUG_PRINTF("\tbound: %d", target_pos); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + DEBUG_PRINTF("\tadded to link: %d\n", target_pos); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + DEBUG_PRINTF("\tstarted link\n"); + return kEndOfJumpChain; + } + } + + int32_t offset = target_pos - pc_offset(); + DCHECK(is_intn(offset, bits)); + DCHECK_EQ(offset & 1, 0); + DEBUG_PRINTF("\toffset = %d\n", offset); + return offset; +} + +void Assembler::label_at_put(Label* L, int at_offset) { + int target_pos; + DEBUG_PRINTF("label_at_put: %p @ %p (%d)\n", L, + reinterpret_cast(buffer_start_ + at_offset), at_offset); + if (L->is_bound()) { + target_pos = L->pos(); + instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + int32_t imm18 = target_pos - at_offset; + DCHECK_EQ(imm18 & 3, 0); + int32_t imm16 = imm18 >> 2; + DCHECK(is_int16(imm16)); + instr_at_put(at_offset, (int32_t)(imm16 & kImm16Mask)); + } else { + target_pos = kEndOfJumpChain; + instr_at_put(at_offset, target_pos); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + } + L->link_to(at_offset); + } +} + +//===----------------------------------------------------------------------===// +// Instructions +//===----------------------------------------------------------------------===// + +void Assembler::lui(Register rd, int32_t imm20) { GenInstrU(LUI, rd, imm20); } + +void Assembler::auipc(Register rd, int32_t imm20) { + GenInstrU(AUIPC, rd, imm20); +} + +// Jumps + +void Assembler::jal(Register rd, int32_t imm21) { + GenInstrJ(JAL, rd, imm21); + BlockTrampolinePoolFor(1); +} + +void Assembler::jalr(Register rd, Register rs1, int16_t imm12) { + GenInstrI(0b000, JALR, rd, rs1, imm12); + BlockTrampolinePoolFor(1); +} + +// Branches + +void Assembler::beq(Register rs1, Register rs2, int16_t imm13) { + GenInstrBranchCC_rri(0b000, rs1, rs2, imm13); +} + +void Assembler::bne(Register rs1, Register rs2, int16_t imm13) { + GenInstrBranchCC_rri(0b001, rs1, rs2, imm13); +} + +void Assembler::blt(Register rs1, Register rs2, int16_t imm13) { + GenInstrBranchCC_rri(0b100, rs1, rs2, imm13); +} + +void Assembler::bge(Register rs1, Register rs2, int16_t imm13) { + GenInstrBranchCC_rri(0b101, rs1, rs2, imm13); +} + +void Assembler::bltu(Register rs1, Register rs2, int16_t imm13) { + GenInstrBranchCC_rri(0b110, rs1, rs2, imm13); +} + +void Assembler::bgeu(Register rs1, Register rs2, int16_t imm13) { + GenInstrBranchCC_rri(0b111, rs1, rs2, imm13); +} + +// Loads + +void Assembler::lb(Register rd, Register rs1, int16_t imm12) { + GenInstrLoad_ri(0b000, rd, rs1, imm12); +} + +void Assembler::lh(Register rd, Register rs1, int16_t imm12) { + GenInstrLoad_ri(0b001, rd, rs1, imm12); +} + +void Assembler::lw(Register rd, Register rs1, int16_t imm12) { + GenInstrLoad_ri(0b010, rd, rs1, imm12); +} + +void Assembler::lbu(Register rd, Register rs1, int16_t imm12) { + GenInstrLoad_ri(0b100, rd, rs1, imm12); +} + +void Assembler::lhu(Register rd, Register rs1, int16_t imm12) { + GenInstrLoad_ri(0b101, rd, rs1, imm12); +} + +// Stores + +void Assembler::sb(Register source, Register base, int16_t imm12) { + GenInstrStore_rri(0b000, base, source, imm12); +} + +void Assembler::sh(Register source, Register base, int16_t imm12) { + GenInstrStore_rri(0b001, base, source, imm12); +} + +void Assembler::sw(Register source, Register base, int16_t imm12) { + GenInstrStore_rri(0b010, base, source, imm12); +} + +// Arithmetic with immediate + +void Assembler::addi(Register rd, Register rs1, int16_t imm12) { + GenInstrALU_ri(0b000, rd, rs1, imm12); +} + +void Assembler::slti(Register rd, Register rs1, int16_t imm12) { + GenInstrALU_ri(0b010, rd, rs1, imm12); +} + +void Assembler::sltiu(Register rd, Register rs1, int16_t imm12) { + GenInstrALU_ri(0b011, rd, rs1, imm12); +} + +void Assembler::xori(Register rd, Register rs1, int16_t imm12) { + GenInstrALU_ri(0b100, rd, rs1, imm12); +} + +void Assembler::ori(Register rd, Register rs1, int16_t imm12) { + GenInstrALU_ri(0b110, rd, rs1, imm12); +} + +void Assembler::andi(Register rd, Register rs1, int16_t imm12) { + GenInstrALU_ri(0b111, rd, rs1, imm12); +} + +void Assembler::slli(Register rd, Register rs1, uint8_t shamt) { + GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f); +} + +void Assembler::srli(Register rd, Register rs1, uint8_t shamt) { + GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f); +} + +void Assembler::srai(Register rd, Register rs1, uint8_t shamt) { + GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f); +} + +// Arithmetic + +void Assembler::add(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2); +} + +void Assembler::sub(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2); +} + +void Assembler::sll(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2); +} + +void Assembler::slt(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2); +} + +void Assembler::sltu(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2); +} + +void Assembler::xor_(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2); +} + +void Assembler::srl(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2); +} + +void Assembler::sra(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2); +} + +void Assembler::or_(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2); +} + +void Assembler::and_(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2); +} + +// Memory fences + +void Assembler::fence(uint8_t pred, uint8_t succ) { + DCHECK(is_uint4(pred) && is_uint4(succ)); + uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8); + GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12); +} + +void Assembler::fence_tso() { + uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8); + GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12); +} + +// Environment call / break + +void Assembler::ecall() { + GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 0); +} + +void Assembler::ebreak() { + GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 1); +} + +// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented +// instruction (i.e., it should always trap, if your implementation has invalid +// instruction traps). +void Assembler::unimp() { + GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000); +} + +// CSR + +void Assembler::csrrw(Register rd, ControlStatusReg csr, Register rs1) { + GenInstrCSR_ir(0b001, rd, csr, rs1); +} + +void Assembler::csrrs(Register rd, ControlStatusReg csr, Register rs1) { + GenInstrCSR_ir(0b010, rd, csr, rs1); +} + +void Assembler::csrrc(Register rd, ControlStatusReg csr, Register rs1) { + GenInstrCSR_ir(0b011, rd, csr, rs1); +} + +void Assembler::csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5) { + GenInstrCSR_ii(0b101, rd, csr, imm5); +} + +void Assembler::csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5) { + GenInstrCSR_ii(0b110, rd, csr, imm5); +} + +void Assembler::csrrci(Register rd, ControlStatusReg csr, uint8_t imm5) { + GenInstrCSR_ii(0b111, rd, csr, imm5); +} + +// RV64I + +void Assembler::lwu(Register rd, Register rs1, int16_t imm12) { + GenInstrLoad_ri(0b110, rd, rs1, imm12); +} + +void Assembler::ld(Register rd, Register rs1, int16_t imm12) { + GenInstrLoad_ri(0b011, rd, rs1, imm12); +} + +void Assembler::sd(Register source, Register base, int16_t imm12) { + GenInstrStore_rri(0b011, base, source, imm12); +} + +void Assembler::addiw(Register rd, Register rs1, int16_t imm12) { + GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12); +} + +void Assembler::slliw(Register rd, Register rs1, uint8_t shamt) { + GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f); +} + +void Assembler::srliw(Register rd, Register rs1, uint8_t shamt) { + GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f); +} + +void Assembler::sraiw(Register rd, Register rs1, uint8_t shamt) { + GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f); +} + +void Assembler::addw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2); +} + +void Assembler::subw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2); +} + +void Assembler::sllw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2); +} + +void Assembler::srlw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2); +} + +void Assembler::sraw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2); +} + +// RV32M Standard Extension + +void Assembler::mul(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b000, rd, rs1, rs2); +} + +void Assembler::mulh(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b001, rd, rs1, rs2); +} + +void Assembler::mulhsu(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b010, rd, rs1, rs2); +} + +void Assembler::mulhu(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b011, rd, rs1, rs2); +} + +void Assembler::div(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b100, rd, rs1, rs2); +} + +void Assembler::divu(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b101, rd, rs1, rs2); +} + +void Assembler::rem(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b110, rd, rs1, rs2); +} + +void Assembler::remu(Register rd, Register rs1, Register rs2) { + GenInstrALU_rr(0b0000001, 0b111, rd, rs1, rs2); +} + +// RV64M Standard Extension (in addition to RV32M) + +void Assembler::mulw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000001, 0b000, rd, rs1, rs2); +} + +void Assembler::divw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000001, 0b100, rd, rs1, rs2); +} + +void Assembler::divuw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000001, 0b101, rd, rs1, rs2); +} + +void Assembler::remw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000001, 0b110, rd, rs1, rs2); +} + +void Assembler::remuw(Register rd, Register rs1, Register rs2) { + GenInstrALUW_rr(0b0000001, 0b111, rd, rs1, rs2); +} + +// RV32A Standard Extension + +void Assembler::lr_w(bool aq, bool rl, Register rd, Register rs1) { + GenInstrRAtomic(0b00010, aq, rl, 0b010, rd, rs1, zero_reg); +} + +void Assembler::sc_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00011, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amoswap_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00001, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amoadd_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00000, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amoxor_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00100, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amoand_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b01100, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amoor_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b01000, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amomin_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b10000, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amomax_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b10100, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amominu_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b11000, aq, rl, 0b010, rd, rs1, rs2); +} + +void Assembler::amomaxu_w(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b11100, aq, rl, 0b010, rd, rs1, rs2); +} + +// RV64A Standard Extension (in addition to RV32A) + +void Assembler::lr_d(bool aq, bool rl, Register rd, Register rs1) { + GenInstrRAtomic(0b00010, aq, rl, 0b011, rd, rs1, zero_reg); +} + +void Assembler::sc_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00011, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amoswap_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00001, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amoadd_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00000, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amoxor_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b00100, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amoand_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b01100, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amoor_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b01000, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amomin_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b10000, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amomax_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b10100, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amominu_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b11000, aq, rl, 0b011, rd, rs1, rs2); +} + +void Assembler::amomaxu_d(bool aq, bool rl, Register rd, Register rs1, + Register rs2) { + GenInstrRAtomic(0b11100, aq, rl, 0b011, rd, rs1, rs2); +} + +// RV32F Standard Extension + +void Assembler::flw(FPURegister rd, Register rs1, int16_t imm12) { + GenInstrLoadFP_ri(0b010, rd, rs1, imm12); +} + +void Assembler::fsw(FPURegister source, Register base, int16_t imm12) { + GenInstrStoreFP_rri(0b010, base, source, imm12); +} + +void Assembler::fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b00, MADD, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b00, MSUB, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b00, NMSUB, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b00, NMADD, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0000000, frm, rd, rs1, rs2); +} + +void Assembler::fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0000100, frm, rd, rs1, rs2); +} + +void Assembler::fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0001000, frm, rd, rs1, rs2); +} + +void Assembler::fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0001100, frm, rd, rs1, rs2); +} + +void Assembler::fsqrt_s(FPURegister rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b0101100, frm, rd, rs1, zero_reg); +} + +void Assembler::fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010000, 0b000, rd, rs1, rs2); +} + +void Assembler::fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010000, 0b001, rd, rs1, rs2); +} + +void Assembler::fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010000, 0b010, rd, rs1, rs2); +} + +void Assembler::fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010100, 0b000, rd, rs1, rs2); +} + +void Assembler::fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010100, 0b001, rd, rs1, rs2); +} + +void Assembler::fcvt_w_s(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100000, frm, rd, rs1, zero_reg); +} + +void Assembler::fcvt_wu_s(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(1)); +} + +void Assembler::fmv_x_w(Register rd, FPURegister rs1) { + GenInstrALUFP_rr(0b1110000, 0b000, rd, rs1, zero_reg); +} + +void Assembler::feq_s(Register rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b1010000, 0b010, rd, rs1, rs2); +} + +void Assembler::flt_s(Register rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b1010000, 0b001, rd, rs1, rs2); +} + +void Assembler::fle_s(Register rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b1010000, 0b000, rd, rs1, rs2); +} + +void Assembler::fclass_s(Register rd, FPURegister rs1) { + GenInstrALUFP_rr(0b1110000, 0b001, rd, rs1, zero_reg); +} + +void Assembler::fcvt_s_w(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101000, frm, rd, rs1, zero_reg); +} + +void Assembler::fcvt_s_wu(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(1)); +} + +void Assembler::fmv_w_x(FPURegister rd, Register rs1) { + GenInstrALUFP_rr(0b1111000, 0b000, rd, rs1, zero_reg); +} + +// RV64F Standard Extension (in addition to RV32F) + +void Assembler::fcvt_l_s(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(2)); +} + +void Assembler::fcvt_lu_s(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(3)); +} + +void Assembler::fcvt_s_l(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(2)); +} + +void Assembler::fcvt_s_lu(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(3)); +} + +// RV32D Standard Extension + +void Assembler::fld(FPURegister rd, Register rs1, int16_t imm12) { + GenInstrLoadFP_ri(0b011, rd, rs1, imm12); +} + +void Assembler::fsd(FPURegister source, Register base, int16_t imm12) { + GenInstrStoreFP_rri(0b011, base, source, imm12); +} + +void Assembler::fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b01, MADD, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b01, MSUB, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b01, NMSUB, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm) { + GenInstrR4(0b01, NMADD, rd, rs1, rs2, rs3, frm); +} + +void Assembler::fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0000001, frm, rd, rs1, rs2); +} + +void Assembler::fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0000101, frm, rd, rs1, rs2); +} + +void Assembler::fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0001001, frm, rd, rs1, rs2); +} + +void Assembler::fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm) { + GenInstrALUFP_rr(0b0001101, frm, rd, rs1, rs2); +} + +void Assembler::fsqrt_d(FPURegister rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b0101101, frm, rd, rs1, zero_reg); +} + +void Assembler::fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010001, 0b000, rd, rs1, rs2); +} + +void Assembler::fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010001, 0b001, rd, rs1, rs2); +} + +void Assembler::fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010001, 0b010, rd, rs1, rs2); +} + +void Assembler::fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010101, 0b000, rd, rs1, rs2); +} + +void Assembler::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b0010101, 0b001, rd, rs1, rs2); +} + +void Assembler::fcvt_s_d(FPURegister rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1)); +} + +void Assembler::fcvt_d_s(FPURegister rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b0100001, frm, rd, rs1, zero_reg); +} + +void Assembler::feq_d(Register rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b1010001, 0b010, rd, rs1, rs2); +} + +void Assembler::flt_d(Register rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b1010001, 0b001, rd, rs1, rs2); +} + +void Assembler::fle_d(Register rd, FPURegister rs1, FPURegister rs2) { + GenInstrALUFP_rr(0b1010001, 0b000, rd, rs1, rs2); +} + +void Assembler::fclass_d(Register rd, FPURegister rs1) { + GenInstrALUFP_rr(0b1110001, 0b001, rd, rs1, zero_reg); +} + +void Assembler::fcvt_w_d(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100001, frm, rd, rs1, zero_reg); +} + +void Assembler::fcvt_wu_d(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1)); +} + +void Assembler::fcvt_d_w(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101001, frm, rd, rs1, zero_reg); +} + +void Assembler::fcvt_d_wu(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1)); +} + +// RV64D Standard Extension (in addition to RV32D) + +void Assembler::fcvt_l_d(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2)); +} + +void Assembler::fcvt_lu_d(Register rd, FPURegister rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3)); +} + +void Assembler::fmv_x_d(Register rd, FPURegister rs1) { + GenInstrALUFP_rr(0b1110001, 0b000, rd, rs1, zero_reg); +} + +void Assembler::fcvt_d_l(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2)); +} + +void Assembler::fcvt_d_lu(FPURegister rd, Register rs1, RoundingMode frm) { + GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3)); +} + +void Assembler::fmv_d_x(FPURegister rd, Register rs1) { + GenInstrALUFP_rr(0b1111001, 0b000, rd, rs1, zero_reg); +} + +// RV64C Standard Extension +void Assembler::c_nop() { GenInstrCI(0b000, C1, zero_reg, 0); } + +void Assembler::c_addi(Register rd, int8_t imm6) { + DCHECK(rd != zero_reg && imm6 != 0); + GenInstrCI(0b000, C1, rd, imm6); +} + +void Assembler::c_addiw(Register rd, int8_t imm6) { + DCHECK(rd != zero_reg); + GenInstrCI(0b001, C1, rd, imm6); +} + +void Assembler::c_addi16sp(int16_t imm10) { + DCHECK(is_int10(imm10) && (imm10 & 0xf) == 0); + uint8_t uimm6 = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) | + ((imm10 & 0x40) >> 3) | ((imm10 & 0x180) >> 6) | + ((imm10 & 0x20) >> 5); + GenInstrCIU(0b011, C1, sp, uimm6); +} + +void Assembler::c_addi4spn(Register rd, int16_t uimm10) { + DCHECK(is_uint10(uimm10) && (uimm10 != 0)); + uint8_t uimm8 = ((uimm10 & 0x4) >> 1) | ((uimm10 & 0x8) >> 3) | + ((uimm10 & 0x30) << 2) | ((uimm10 & 0x3c0) >> 4); + GenInstrCIW(0b000, C0, rd, uimm8); +} + +void Assembler::c_li(Register rd, int8_t imm6) { + DCHECK(rd != zero_reg); + GenInstrCI(0b010, C1, rd, imm6); +} + +void Assembler::c_lui(Register rd, int8_t imm6) { + DCHECK(rd != zero_reg && rd != sp && imm6 != 0); + GenInstrCI(0b011, C1, rd, imm6); +} + +void Assembler::c_slli(Register rd, uint8_t shamt6) { + DCHECK(rd != zero_reg && shamt6 != 0); + GenInstrCIU(0b000, C2, rd, shamt6); +} + +void Assembler::c_fldsp(FPURegister rd, uint16_t uimm9) { + DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0); + uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6); + GenInstrCIU(0b001, C2, rd, uimm6); +} + +void Assembler::c_lwsp(Register rd, uint16_t uimm8) { + DCHECK(rd != zero_reg && is_uint8(uimm8) && (uimm8 & 0x3) == 0); + uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6); + GenInstrCIU(0b010, C2, rd, uimm6); +} + +void Assembler::c_ldsp(Register rd, uint16_t uimm9) { + DCHECK(rd != zero_reg && is_uint9(uimm9) && (uimm9 & 0x7) == 0); + uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6); + GenInstrCIU(0b011, C2, rd, uimm6); +} + +void Assembler::c_jr(Register rs1) { + DCHECK(rs1 != zero_reg); + GenInstrCR(0b1000, C2, rs1, zero_reg); + BlockTrampolinePoolFor(1); +} + +void Assembler::c_mv(Register rd, Register rs2) { + DCHECK(rd != zero_reg && rs2 != zero_reg); + GenInstrCR(0b1000, C2, rd, rs2); +} + +void Assembler::c_ebreak() { GenInstrCR(0b1001, C2, zero_reg, zero_reg); } + +void Assembler::c_jalr(Register rs1) { + DCHECK(rs1 != zero_reg); + GenInstrCR(0b1001, C2, rs1, zero_reg); + BlockTrampolinePoolFor(1); +} + +void Assembler::c_add(Register rd, Register rs2) { + DCHECK(rd != zero_reg && rs2 != zero_reg); + GenInstrCR(0b1001, C2, rd, rs2); +} + +// CA Instructions +void Assembler::c_sub(Register rd, Register rs2) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs2.code() & 0b11000) == 0b01000)); + GenInstrCA(0b100011, C1, rd, 0b00, rs2); +} + +void Assembler::c_xor(Register rd, Register rs2) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs2.code() & 0b11000) == 0b01000)); + GenInstrCA(0b100011, C1, rd, 0b01, rs2); +} + +void Assembler::c_or(Register rd, Register rs2) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs2.code() & 0b11000) == 0b01000)); + GenInstrCA(0b100011, C1, rd, 0b10, rs2); +} + +void Assembler::c_and(Register rd, Register rs2) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs2.code() & 0b11000) == 0b01000)); + GenInstrCA(0b100011, C1, rd, 0b11, rs2); +} + +void Assembler::c_subw(Register rd, Register rs2) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs2.code() & 0b11000) == 0b01000)); + GenInstrCA(0b100111, C1, rd, 0b00, rs2); +} + +void Assembler::c_addw(Register rd, Register rs2) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs2.code() & 0b11000) == 0b01000)); + GenInstrCA(0b100111, C1, rd, 0b01, rs2); +} + +void Assembler::c_swsp(Register rs2, uint16_t uimm8) { + DCHECK(is_uint8(uimm8) && (uimm8 & 0x3) == 0); + uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6); + GenInstrCSS(0b110, C2, rs2, uimm6); +} + +void Assembler::c_sdsp(Register rs2, uint16_t uimm9) { + DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0); + uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6); + GenInstrCSS(0b111, C2, rs2, uimm6); +} + +void Assembler::c_fsdsp(FPURegister rs2, uint16_t uimm9) { + DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0); + uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6); + GenInstrCSS(0b101, C2, rs2, uimm6); +} + +// CL Instructions + +void Assembler::c_lw(Register rd, Register rs1, uint16_t uimm7) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) && + ((uimm7 & 0x3) == 0)); + uint8_t uimm5 = + ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1); + GenInstrCL(0b010, C0, rd, rs1, uimm5); +} + +void Assembler::c_ld(Register rd, Register rs1, uint16_t uimm8) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) && + ((uimm8 & 0x7) == 0)); + uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6); + GenInstrCL(0b011, C0, rd, rs1, uimm5); +} + +void Assembler::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) { + DCHECK(((rd.code() & 0b11000) == 0b01000) && + ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) && + ((uimm8 & 0x7) == 0)); + uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6); + GenInstrCL(0b001, C0, rd, rs1, uimm5); +} + +// CS Instructions + +void Assembler::c_sw(Register rs2, Register rs1, uint16_t uimm7) { + DCHECK(((rs2.code() & 0b11000) == 0b01000) && + ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) && + ((uimm7 & 0x3) == 0)); + uint8_t uimm5 = + ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1); + GenInstrCS(0b110, C0, rs2, rs1, uimm5); +} + +void Assembler::c_sd(Register rs2, Register rs1, uint16_t uimm8) { + DCHECK(((rs2.code() & 0b11000) == 0b01000) && + ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) && + ((uimm8 & 0x7) == 0)); + uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6); + GenInstrCS(0b111, C0, rs2, rs1, uimm5); +} + +void Assembler::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) { + DCHECK(((rs2.code() & 0b11000) == 0b01000) && + ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) && + ((uimm8 & 0x7) == 0)); + uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6); + GenInstrCS(0b101, C0, rs2, rs1, uimm5); +} + +// CJ Instructions + +void Assembler::c_j(int16_t imm12) { + DCHECK(is_int12(imm12)); + int16_t uimm11 = ((imm12 & 0x800) >> 1) | ((imm12 & 0x400) >> 4) | + ((imm12 & 0x300) >> 1) | ((imm12 & 0x80) >> 3) | + ((imm12 & 0x40) >> 1) | ((imm12 & 0x20) >> 5) | + ((imm12 & 0x10) << 5) | (imm12 & 0xe); + GenInstrCJ(0b101, C1, uimm11); + BlockTrampolinePoolFor(1); +} + +// CB Instructions + +void Assembler::c_bnez(Register rs1, int16_t imm9) { + DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9)); + uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) | + ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1); + GenInstrCB(0b111, C1, rs1, uimm8); +} + +void Assembler::c_beqz(Register rs1, int16_t imm9) { + DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9)); + uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) | + ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1); + GenInstrCB(0b110, C1, rs1, uimm8); +} + +void Assembler::c_srli(Register rs1, int8_t shamt6) { + DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6)); + GenInstrCBA(0b100, 0b00, C1, rs1, shamt6); +} + +void Assembler::c_srai(Register rs1, int8_t shamt6) { + DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6)); + GenInstrCBA(0b100, 0b01, C1, rs1, shamt6); +} + +void Assembler::c_andi(Register rs1, int8_t imm6) { + DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6)); + GenInstrCBA(0b100, 0b10, C1, rs1, imm6); +} + +// Definitions for using compressed vs non compressed + +void Assembler::NOP() { + if (FLAG_riscv_c_extension) + c_nop(); + else + nop(); +} + +void Assembler::EBREAK() { + if (FLAG_riscv_c_extension) + c_ebreak(); + else + ebreak(); +} + +// RVV + +void Assembler::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask) { + GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask); +} + +void Assembler::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask) { + GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask); +} + +void Assembler::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask) { + GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask); +} + +void Assembler::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask) { + GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask); +} + +void Assembler::vmv_vv(VRegister vd, VRegister vs1) { + GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask); +} + +void Assembler::vmv_vx(VRegister vd, Register rs1) { + GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask); +} + +void Assembler::vmv_vi(VRegister vd, uint8_t simm5) { + GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask); +} + +void Assembler::vmv_xs(Register rd, VRegister vs2) { + GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b00000, vs2, NoMask); +} + +void Assembler::vmv_sx(VRegister vd, Register rs1) { + GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask); +} + +void Assembler::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) { + GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask); +} + +void Assembler::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) { + GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask); +} + +void Assembler::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) { + GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask); +} + +void Assembler::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) { + GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask); +} + +void Assembler::vadc_vx(VRegister vd, Register rs1, VRegister vs2) { + GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask); +} + +void Assembler::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) { + GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask); +} + +void Assembler::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) { + GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask); +} + +void Assembler::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) { + GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask); +} + +void Assembler::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) { + GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask); +} + +void Assembler::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask) { + DCHECK_NE(vd, vs1); + DCHECK_NE(vd, vs2); + GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask); +} + +void Assembler::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5, + MaskType mask) { + DCHECK_NE(vd, vs2); + GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask); +} + +void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1, + MaskType mask) { + DCHECK_NE(vd, vs2); + GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask); +} + +void Assembler::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1, + MaskType mask) { + GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask); +} + +void Assembler::vid_v(VRegister vd, MaskType mask) { + GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask); +} + +#define DEFINE_OPIVV(name, funct6) \ + void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \ + } + +#define DEFINE_OPFVV(name, funct6) \ + void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \ + } + +#define DEFINE_OPFWV(name, funct6) \ + void Assembler::name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \ + } + +#define DEFINE_OPFRED(name, funct6) \ + void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \ + } + +#define DEFINE_OPIVX(name, funct6) \ + void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \ + } + +#define DEFINE_OPIVI(name, funct6) \ + void Assembler::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \ + MaskType mask) { \ + GenInstrV(funct6, vd, imm5, vs2, mask); \ + } + +#define DEFINE_OPMVV(name, funct6) \ + void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \ + } + +// void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1, +// VRegister vs2, MaskType mask = NoMask); +#define DEFINE_OPMVX(name, funct6) \ + void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \ + } + +#define DEFINE_OPFVF(name, funct6) \ + void Assembler::name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \ + } + +#define DEFINE_OPFWF(name, funct6) \ + void Assembler::name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \ + MaskType mask) { \ + GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \ + } + +#define DEFINE_OPFVV_FMA(name, funct6) \ + void Assembler::name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \ + MaskType mask) { \ + GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \ + } + +#define DEFINE_OPFVF_FMA(name, funct6) \ + void Assembler::name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \ + MaskType mask) { \ + GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \ + } + +// vector integer extension +#define DEFINE_OPMVV_VIE(name, vs1) \ + void Assembler::name(VRegister vd, VRegister vs2, MaskType mask) { \ + GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \ + } + +void Assembler::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) { + GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask); +} + +void Assembler::vfmv_fs(FPURegister fd, VRegister vs2) { + GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask); +} + +void Assembler::vfmv_sf(VRegister vd, FPURegister fs) { + GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask); +} + +DEFINE_OPIVV(vadd, VADD_FUNCT6) +DEFINE_OPIVX(vadd, VADD_FUNCT6) +DEFINE_OPIVI(vadd, VADD_FUNCT6) +DEFINE_OPIVV(vsub, VSUB_FUNCT6) +DEFINE_OPIVX(vsub, VSUB_FUNCT6) +DEFINE_OPMVX(vdiv, VDIV_FUNCT6) +DEFINE_OPMVX(vdivu, VDIVU_FUNCT6) +DEFINE_OPMVX(vmul, VMUL_FUNCT6) +DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6) +DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6) +DEFINE_OPMVX(vmulh, VMULH_FUNCT6) +DEFINE_OPMVV(vdiv, VDIV_FUNCT6) +DEFINE_OPMVV(vdivu, VDIVU_FUNCT6) +DEFINE_OPMVV(vmul, VMUL_FUNCT6) +DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6) +DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6) +DEFINE_OPMVV(vwmul, VWMUL_FUNCT6) +DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6) +DEFINE_OPMVV(vmulh, VMULH_FUNCT6) +DEFINE_OPMVV(vwadd, VWADD_FUNCT6) +DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6) +DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6) +DEFINE_OPIVX(vsadd, VSADD_FUNCT6) +DEFINE_OPIVV(vsadd, VSADD_FUNCT6) +DEFINE_OPIVI(vsadd, VSADD_FUNCT6) +DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6) +DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6) +DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6) +DEFINE_OPIVX(vssub, VSSUB_FUNCT6) +DEFINE_OPIVV(vssub, VSSUB_FUNCT6) +DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6) +DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6) +DEFINE_OPIVX(vrsub, VRSUB_FUNCT6) +DEFINE_OPIVI(vrsub, VRSUB_FUNCT6) +DEFINE_OPIVV(vminu, VMINU_FUNCT6) +DEFINE_OPIVX(vminu, VMINU_FUNCT6) +DEFINE_OPIVV(vmin, VMIN_FUNCT6) +DEFINE_OPIVX(vmin, VMIN_FUNCT6) +DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6) +DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6) +DEFINE_OPIVV(vmax, VMAX_FUNCT6) +DEFINE_OPIVX(vmax, VMAX_FUNCT6) +DEFINE_OPIVV(vand, VAND_FUNCT6) +DEFINE_OPIVX(vand, VAND_FUNCT6) +DEFINE_OPIVI(vand, VAND_FUNCT6) +DEFINE_OPIVV(vor, VOR_FUNCT6) +DEFINE_OPIVX(vor, VOR_FUNCT6) +DEFINE_OPIVI(vor, VOR_FUNCT6) +DEFINE_OPIVV(vxor, VXOR_FUNCT6) +DEFINE_OPIVX(vxor, VXOR_FUNCT6) +DEFINE_OPIVI(vxor, VXOR_FUNCT6) + +DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6) +DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6) +DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6) +DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6) + +DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6) +DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6) +DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6) + +DEFINE_OPIVV(vmsne, VMSNE_FUNCT6) +DEFINE_OPIVX(vmsne, VMSNE_FUNCT6) +DEFINE_OPIVI(vmsne, VMSNE_FUNCT6) + +DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6) +DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6) + +DEFINE_OPIVV(vmslt, VMSLT_FUNCT6) +DEFINE_OPIVX(vmslt, VMSLT_FUNCT6) + +DEFINE_OPIVV(vmsle, VMSLE_FUNCT6) +DEFINE_OPIVX(vmsle, VMSLE_FUNCT6) +DEFINE_OPIVI(vmsle, VMSLE_FUNCT6) + +DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6) +DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6) +DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6) + +DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6) +DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6) + +DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6) +DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6) + +DEFINE_OPIVV(vsrl, VSRL_FUNCT6) +DEFINE_OPIVX(vsrl, VSRL_FUNCT6) +DEFINE_OPIVI(vsrl, VSRL_FUNCT6) + +DEFINE_OPIVV(vsra, VSRA_FUNCT6) +DEFINE_OPIVX(vsra, VSRA_FUNCT6) +DEFINE_OPIVI(vsra, VSRA_FUNCT6) + +DEFINE_OPIVV(vsll, VSLL_FUNCT6) +DEFINE_OPIVX(vsll, VSLL_FUNCT6) +DEFINE_OPIVI(vsll, VSLL_FUNCT6) + +DEFINE_OPIVV(vsmul, VSMUL_FUNCT6) +DEFINE_OPIVX(vsmul, VSMUL_FUNCT6) + +DEFINE_OPFVV(vfadd, VFADD_FUNCT6) +DEFINE_OPFVF(vfadd, VFADD_FUNCT6) +DEFINE_OPFVV(vfsub, VFSUB_FUNCT6) +DEFINE_OPFVF(vfsub, VFSUB_FUNCT6) +DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6) +DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6) +DEFINE_OPFVV(vfmul, VFMUL_FUNCT6) +DEFINE_OPFVF(vfmul, VFMUL_FUNCT6) +DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6) +DEFINE_OPFVV(vmfne, VMFNE_FUNCT6) +DEFINE_OPFVV(vmflt, VMFLT_FUNCT6) +DEFINE_OPFVV(vmfle, VMFLE_FUNCT6) +DEFINE_OPFVV(vfmax, VFMAX_FUNCT6) +DEFINE_OPFVV(vfmin, VFMIN_FUNCT6) + +// Vector Widening Floating-Point Add/Subtract Instructions +DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6) +DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6) +DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6) +DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6) +DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6) +DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6) +DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6) +DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6) + +// Vector Widening Floating-Point Reduction Instructions +DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6) +DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6) + +// Vector Widening Floating-Point Multiply +DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6) +DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6) + +DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6) + +DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6) +DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6) +DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6) +DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6) +DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6) +DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6) + +// Vector Single-Width Floating-Point Fused Multiply-Add Instructions +DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6) +DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6) +DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6) +DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6) +DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6) +DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6) +DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6) +DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6) +DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6) +DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6) +DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6) +DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6) +DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6) +DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6) +DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6) +DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6) + +// Vector Widening Floating-Point Fused Multiply-Add Instructions +DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6) +DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6) +DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6) +DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6) +DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6) +DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6) +DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6) +DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6) + +// Vector Narrowing Fixed-Point Clip Instructions +DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6) +DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6) +DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6) +DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6) +DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6) +DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6) + +// Vector Integer Extension +DEFINE_OPMVV_VIE(vzext_vf8, 0b00010) +DEFINE_OPMVV_VIE(vsext_vf8, 0b00011) +DEFINE_OPMVV_VIE(vzext_vf4, 0b00100) +DEFINE_OPMVV_VIE(vsext_vf4, 0b00101) +DEFINE_OPMVV_VIE(vzext_vf2, 0b00110) +DEFINE_OPMVV_VIE(vsext_vf2, 0b00111) + +#undef DEFINE_OPIVI +#undef DEFINE_OPIVV +#undef DEFINE_OPIVX +#undef DEFINE_OPFVV +#undef DEFINE_OPFWV +#undef DEFINE_OPFVF +#undef DEFINE_OPFWF +#undef DEFINE_OPFVV_FMA +#undef DEFINE_OPFVF_FMA +#undef DEFINE_OPMVV_VIE + +void Assembler::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul, + TailAgnosticType tail, MaskAgnosticType mask) { + int32_t zimm = GenZimm(vsew, vlmul, tail, mask); + Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) | + ((rs1.code() & 0x1F) << kRvvRs1Shift) | + (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31; + emit(instr); +} + +void Assembler::vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul, + TailAgnosticType tail, MaskAgnosticType mask) { + DCHECK(is_uint5(uimm)); + int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF; + Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) | + ((uimm & 0x1F) << kRvvUimmShift) | + (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30; + emit(instr); +} + +void Assembler::vsetvl(Register rd, Register rs1, Register rs2) { + Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) | + ((rs1.code() & 0x1F) << kRvvRs1Shift) | + ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25; + emit(instr); +} + +uint8_t vsew_switch(VSew vsew) { + uint8_t width; + switch (vsew) { + case E8: + width = 0b000; + break; + case E16: + width = 0b101; + break; + case E32: + width = 0b110; + break; + default: + width = 0b111; + break; + } + return width; +} + +void Assembler::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000); +} +void Assembler::vls(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000); +} +void Assembler::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0); +} + +void Assembler::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000); +} +void Assembler::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000); +} + +void Assembler::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000); +} +void Assembler::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000); +} + +void Assembler::vlseg2(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001); +} + +void Assembler::vlseg3(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010); +} + +void Assembler::vlseg4(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011); +} + +void Assembler::vlseg5(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100); +} + +void Assembler::vlseg6(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101); +} + +void Assembler::vlseg7(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110); +} + +void Assembler::vlseg8(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111); +} +void Assembler::vsseg2(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001); +} +void Assembler::vsseg3(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010); +} +void Assembler::vsseg4(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011); +} +void Assembler::vsseg5(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100); +} +void Assembler::vsseg6(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101); +} +void Assembler::vsseg7(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110); +} +void Assembler::vsseg8(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111); +} + +void Assembler::vlsseg2(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001); +} +void Assembler::vlsseg3(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010); +} +void Assembler::vlsseg4(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011); +} +void Assembler::vlsseg5(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100); +} +void Assembler::vlsseg6(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101); +} +void Assembler::vlsseg7(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110); +} +void Assembler::vlsseg8(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111); +} +void Assembler::vssseg2(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001); +} +void Assembler::vssseg3(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010); +} +void Assembler::vssseg4(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011); +} +void Assembler::vssseg5(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100); +} +void Assembler::vssseg6(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101); +} +void Assembler::vssseg7(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110); +} +void Assembler::vssseg8(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111); +} + +void Assembler::vlxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001); +} +void Assembler::vlxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010); +} +void Assembler::vlxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011); +} +void Assembler::vlxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100); +} +void Assembler::vlxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101); +} +void Assembler::vlxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110); +} +void Assembler::vlxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111); +} +void Assembler::vsxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001); +} +void Assembler::vsxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010); +} +void Assembler::vsxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011); +} +void Assembler::vsxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100); +} +void Assembler::vsxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101); +} +void Assembler::vsxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110); +} +void Assembler::vsxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew, + MaskType mask) { + uint8_t width = vsew_switch(vsew); + GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111); +} + +void Assembler::vfirst_m(Register rd, VRegister vs2, MaskType mask) { + GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10001, vs2, mask); +} + +void Assembler::vcpop_m(Register rd, VRegister vs2, MaskType mask) { + GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10000, vs2, mask); +} + +// Privileged +void Assembler::uret() { + GenInstrPriv(0b0000000, ToRegister(0), ToRegister(0b00010)); +} + +void Assembler::sret() { + GenInstrPriv(0b0001000, ToRegister(0), ToRegister(0b00010)); +} + +void Assembler::mret() { + GenInstrPriv(0b0011000, ToRegister(0), ToRegister(0b00010)); +} + +void Assembler::wfi() { + GenInstrPriv(0b0001000, ToRegister(0), ToRegister(0b00101)); +} + +void Assembler::sfence_vma(Register rs1, Register rs2) { + GenInstrR(0b0001001, 0b000, SYSTEM, ToRegister(0), rs1, rs2); +} + +// Assembler Pseudo Instructions (Tables 25.2 and 25.3, RISC-V Unprivileged ISA) + +void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); } + +void Assembler::RV_li(Register rd, int64_t imm) { + // 64-bit imm is put in the register rd. + // In most cases the imm is 32 bit and 2 instructions are generated. If a + // temporary register is available, in the worst case, 6 instructions are + // generated for a full 64-bit immediate. If temporay register is not + // available the maximum will be 8 instructions. If imm is more than 32 bits + // and a temp register is available, imm is divided into two 32-bit parts, + // low_32 and up_32. Each part is built in a separate register. low_32 is + // built before up_32. If low_32 is negative (upper 32 bits are 1), 0xffffffff + // is subtracted from up_32 before up_32 is built. This compensates for 32 + // bits of 1's in the lower when the two registers are added. If no temp is + // available, the upper 32 bit is built in rd, and the lower 32 bits are + // devided to 3 parts (11, 11, and 10 bits). The parts are shifted and added + // to the upper part built in rd. + if (is_int32(imm + 0x800)) { + // 32-bit case. Maximum of 2 instructions generated + int64_t high_20 = ((imm + 0x800) >> 12); + int64_t low_12 = imm << 52 >> 52; + if (high_20) { + lui(rd, (int32_t)high_20); + if (low_12) { + addi(rd, rd, low_12); + } + } else { + addi(rd, zero_reg, low_12); + } + return; + } else { + // 64-bit case: divide imm into two 32-bit parts, upper and lower + int64_t up_32 = imm >> 32; + int64_t low_32 = imm & 0xffffffffull; + Register temp_reg = rd; + // Check if a temporary register is available + if (up_32 == 0 || low_32 == 0) { + // No temp register is needed + } else { + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + temp_reg = temps.hasAvailable() ? temps.Acquire() : no_reg; + } + if (temp_reg != no_reg) { + // keep track of hardware behavior for lower part in sim_low + int64_t sim_low = 0; + // Build lower part + if (low_32 != 0) { + int64_t high_20 = ((low_32 + 0x800) >> 12); + int64_t low_12 = low_32 & 0xfff; + if (high_20) { + // Adjust to 20 bits for the case of overflow + high_20 &= 0xfffff; + sim_low = ((high_20 << 12) << 32) >> 32; + lui(rd, (int32_t)high_20); + if (low_12) { + sim_low += (low_12 << 52 >> 52) | low_12; + addi(rd, rd, low_12); + } + } else { + sim_low = low_12; + ori(rd, zero_reg, low_12); + } + } + if (sim_low & 0x100000000) { + // Bit 31 is 1. Either an overflow or a negative 64 bit + if (up_32 == 0) { + // Positive number, but overflow because of the add 0x800 + slli(rd, rd, 32); + srli(rd, rd, 32); + return; + } + // low_32 is a negative 64 bit after the build + up_32 = (up_32 - 0xffffffff) & 0xffffffff; + } + if (up_32 == 0) { + return; + } + // Build upper part in a temporary register + if (low_32 == 0) { + // Build upper part in rd + temp_reg = rd; + } + int64_t high_20 = (up_32 + 0x800) >> 12; + int64_t low_12 = up_32 & 0xfff; + if (high_20) { + // Adjust to 20 bits for the case of overflow + high_20 &= 0xfffff; + lui(temp_reg, (int32_t)high_20); + if (low_12) { + addi(temp_reg, temp_reg, low_12); + } + } else { + ori(temp_reg, zero_reg, low_12); + } + // Put it at the bgining of register + slli(temp_reg, temp_reg, 32); + if (low_32 != 0) { + add(rd, rd, temp_reg); + } + return; + } + // No temp register. Build imm in rd. + // Build upper 32 bits first in rd. Divide lower 32 bits parts and add + // parts to the upper part by doing shift and add. + // First build upper part in rd. + int64_t high_20 = (up_32 + 0x800) >> 12; + int64_t low_12 = up_32 & 0xfff; + if (high_20) { + // Adjust to 20 bits for the case of overflow + high_20 &= 0xfffff; + lui(rd, (int32_t)high_20); + if (low_12) { + addi(rd, rd, low_12); + } + } else { + ori(rd, zero_reg, low_12); + } + // upper part already in rd. Each part to be added to rd, has maximum of 11 + // bits, and always starts with a 1. rd is shifted by the size of the part + // plus the number of zeros between the parts. Each part is added after the + // left shift. + uint32_t mask = 0x80000000; + int32_t shift_val = 0; + int32_t i; + for (i = 0; i < 32; i++) { + if ((low_32 & mask) == 0) { + mask >>= 1; + shift_val++; + if (i == 31) { + // rest is zero + slli(rd, rd, shift_val); + } + continue; + } + // The first 1 seen + int32_t part; + if ((i + 11) < 32) { + // Pick 11 bits + part = ((uint32_t)(low_32 << i) >> i) >> (32 - (i + 11)); + slli(rd, rd, shift_val + 11); + ori(rd, rd, part); + i += 10; + mask >>= 11; + } else { + part = (uint32_t)(low_32 << i) >> i; + slli(rd, rd, shift_val + (32 - i)); + ori(rd, rd, part); + break; + } + shift_val = 0; + } + } +} + +int Assembler::li_estimate(int64_t imm, bool is_get_temp_reg) { + int count = 0; + // imitate Assembler::RV_li + if (is_int32(imm + 0x800)) { + // 32-bit case. Maximum of 2 instructions generated + int64_t high_20 = ((imm + 0x800) >> 12); + int64_t low_12 = imm << 52 >> 52; + if (high_20) { + count++; + if (low_12) { + count++; + } + } else { + count++; + } + return count; + } else { + // 64-bit case: divide imm into two 32-bit parts, upper and lower + int64_t up_32 = imm >> 32; + int64_t low_32 = imm & 0xffffffffull; + // Check if a temporary register is available + if (is_get_temp_reg) { + // keep track of hardware behavior for lower part in sim_low + int64_t sim_low = 0; + // Build lower part + if (low_32 != 0) { + int64_t high_20 = ((low_32 + 0x800) >> 12); + int64_t low_12 = low_32 & 0xfff; + if (high_20) { + // Adjust to 20 bits for the case of overflow + high_20 &= 0xfffff; + sim_low = ((high_20 << 12) << 32) >> 32; + count++; + if (low_12) { + sim_low += (low_12 << 52 >> 52) | low_12; + count++; + } + } else { + sim_low = low_12; + count++; + } + } + if (sim_low & 0x100000000) { + // Bit 31 is 1. Either an overflow or a negative 64 bit + if (up_32 == 0) { + // Positive number, but overflow because of the add 0x800 + count++; + count++; + return count; + } + // low_32 is a negative 64 bit after the build + up_32 = (up_32 - 0xffffffff) & 0xffffffff; + } + if (up_32 == 0) { + return count; + } + int64_t high_20 = (up_32 + 0x800) >> 12; + int64_t low_12 = up_32 & 0xfff; + if (high_20) { + // Adjust to 20 bits for the case of overflow + high_20 &= 0xfffff; + count++; + if (low_12) { + count++; + } + } else { + count++; + } + // Put it at the bgining of register + count++; + if (low_32 != 0) { + count++; + } + return count; + } + // No temp register. Build imm in rd. + // Build upper 32 bits first in rd. Divide lower 32 bits parts and add + // parts to the upper part by doing shift and add. + // First build upper part in rd. + int64_t high_20 = (up_32 + 0x800) >> 12; + int64_t low_12 = up_32 & 0xfff; + if (high_20) { + // Adjust to 20 bits for the case of overflow + high_20 &= 0xfffff; + count++; + if (low_12) { + count++; + } + } else { + count++; + } + // upper part already in rd. Each part to be added to rd, has maximum of 11 + // bits, and always starts with a 1. rd is shifted by the size of the part + // plus the number of zeros between the parts. Each part is added after the + // left shift. + uint32_t mask = 0x80000000; + int32_t i; + for (i = 0; i < 32; i++) { + if ((low_32 & mask) == 0) { + mask >>= 1; + if (i == 31) { + // rest is zero + count++; + } + continue; + } + // The first 1 seen + if ((i + 11) < 32) { + // Pick 11 bits + count++; + count++; + i += 10; + mask >>= 11; + } else { + count++; + count++; + break; + } + } + } + return count; +} + +void Assembler::li_ptr(Register rd, int64_t imm) { + // Initialize rd with an address + // Pointers are 48 bits + // 6 fixed instructions are generated + DCHECK_EQ((imm & 0xfff0000000000000ll), 0); + int64_t a6 = imm & 0x3f; // bits 0:5. 6 bits + int64_t b11 = (imm >> 6) & 0x7ff; // bits 6:11. 11 bits + int64_t high_31 = (imm >> 17) & 0x7fffffff; // 31 bits + int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits + int64_t low_12 = high_31 & 0xfff; // 12 bits + lui(rd, (int32_t)high_20); + addi(rd, rd, low_12); // 31 bits in rd. + slli(rd, rd, 11); // Space for next 11 bis + ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd + slli(rd, rd, 6); // Space for next 6 bits + ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd +} + +void Assembler::li_constant(Register rd, int64_t imm) { + DEBUG_PRINTF("li_constant(%d, %lx <%ld>)\n", ToNumber(rd), imm, imm); + lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >> + 48); // Bits 63:48 + addiw(rd, rd, + (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >> + 52); // Bits 47:36 + slli(rd, rd, 12); + addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52); // Bits 35:24 + slli(rd, rd, 12); + addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); // Bits 23:12 + slli(rd, rd, 12); + addi(rd, rd, imm << 52 >> 52); // Bits 11:0 +} + +// Break / Trap instructions. +void Assembler::break_(uint32_t code, bool break_as_stop) { + // We need to invalidate breaks that could be stops as well because the + // simulator expects a char pointer after the stop instruction. + // See constants-mips.h for explanation. + DCHECK( + (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) || + (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode))); + + // since ebreak does not allow additional immediate field, we use the + // immediate field of lui instruction immediately following the ebreak to + // encode the "code" info + ebreak(); + DCHECK(is_uint20(code)); + lui(zero_reg, code); +} + +void Assembler::stop(uint32_t code) { + DCHECK_GT(code, kMaxWatchpointCode); + DCHECK_LE(code, kMaxStopCode); +#if defined(V8_HOST_ARCH_RISCV64) + break_(0x54321); +#else // V8_HOST_ARCH_RISCV64 + break_(code, true); +#endif +} + +// Original MIPS Instructions + +// ------------Memory-instructions------------- + +bool Assembler::NeedAdjustBaseAndOffset(const MemOperand& src, + OffsetAccessType access_type, + int second_access_add_to_offset) { + bool two_accesses = static_cast(access_type); + DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. + + // is_int12 must be passed a signed value, hence the static cast below. + if (is_int12(src.offset()) && + (!two_accesses || is_int12(static_cast( + src.offset() + second_access_add_to_offset)))) { + // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified + // value) fits into int12. + return false; + } + return true; +} + +void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch, + OffsetAccessType access_type, + int second_Access_add_to_offset) { + // This method is used to adjust the base register and offset pair + // for a load/store when the offset doesn't fit into int12. + + // Must not overwrite the register 'base' while loading 'offset'. + constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8; + constexpr int32_t kMaxOffsetForSimpleAdjustment = + 2 * kMinOffsetForSimpleAdjustment; + if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { + addi(scratch, src->rm(), kMinOffsetForSimpleAdjustment); + src->offset_ -= kMinOffsetForSimpleAdjustment; + } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && + src->offset() < 0) { + addi(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); + src->offset_ += kMinOffsetForSimpleAdjustment; + } else if (access_type == OffsetAccessType::SINGLE_ACCESS) { + RV_li(scratch, (static_cast(src->offset()) + 0x800) >> 12 << 12); + add(scratch, scratch, src->rm()); + src->offset_ = src->offset() << 20 >> 20; + } else { + RV_li(scratch, src->offset()); + add(scratch, scratch, src->rm()); + src->offset_ = 0; + } + src->rm_ = scratch; +} + +int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta) { + if (RelocInfo::IsInternalReference(rmode)) { + int64_t* p = reinterpret_cast(pc); + if (*p == kEndOfJumpChain) { + return 0; // Number of instructions patched. + } + *p += pc_delta; + return 2; // Number of instructions patched. + } + Instr instr = instr_at(pc); + DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); + if (IsLui(instr)) { + uint64_t target_address = target_address_at(pc) + pc_delta; + DEBUG_PRINTF("target_address 0x%lx\n", target_address); + set_target_value_at(pc, target_address); + return 8; // Number of instructions patched. + } else { + UNIMPLEMENTED(); + } +} + +void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta) { + Instr instr = instr_at(pc); + Instr instr1 = instr_at(pc + 1 * kInstrSize); + DCHECK(RelocInfo::IsRelativeCodeTarget(rmode)); + if (IsAuipc(instr) && IsJalr(instr1)) { + int32_t imm; + imm = BrachlongOffset(instr, instr1); + imm -= pc_delta; + PatchBranchlongOffset(pc, instr, instr1, imm); + return; + } else { + UNREACHABLE(); + } +} + +void Assembler::GrowBuffer() { + DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_); + // Compute new buffer size. + int old_size = buffer_->size(); + int new_size = std::min(2 * old_size, old_size + 1 * MB); + + // Some internal data structures overflow for very large buffers, + // they must ensure that kMaximalBufferSize is not too large. + if (new_size > kMaximalBufferSize) { + V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer"); + } + + // Set up new buffer. + std::unique_ptr new_buffer = buffer_->Grow(new_size); + DCHECK_EQ(new_size, new_buffer->size()); + byte* new_start = new_buffer->start(); + + // Copy the data. + intptr_t pc_delta = new_start - buffer_start_; + intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size); + size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); + MemMove(new_start, buffer_start_, pc_offset()); + MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), + reloc_size); + + // Switch buffers. + buffer_ = std::move(new_buffer); + buffer_start_ = new_start; + DEBUG_PRINTF("%p\n", buffer_start_); + pc_ += pc_delta; + reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.last_pc() + pc_delta); + + // Relocate runtime entries. + base::Vector instructions{buffer_start_, + static_cast(pc_offset())}; + base::Vector reloc_info{reloc_info_writer.pos(), reloc_size}; + for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) { + RelocInfo::Mode rmode = it.rinfo()->rmode(); + if (rmode == RelocInfo::INTERNAL_REFERENCE) { + RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta); + } + } + + DCHECK(!overflow()); +} + +void Assembler::db(uint8_t data) { + if (!is_buffer_growth_blocked()) CheckBuffer(); + DEBUG_PRINTF("%p: constant 0x%x\n", pc_, data); + EmitHelper(data); +} + +void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) { + if (!RelocInfo::IsNoInfo(rmode)) { + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); + RecordRelocInfo(rmode); + } + if (!is_buffer_growth_blocked()) CheckBuffer(); + DEBUG_PRINTF("%p: constant 0x%x\n", pc_, data); + EmitHelper(data); +} + +void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) { + if (!RelocInfo::IsNoInfo(rmode)) { + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); + RecordRelocInfo(rmode); + } + if (!is_buffer_growth_blocked()) CheckBuffer(); + DEBUG_PRINTF("%p: constant 0x%lx\n", pc_, data); + EmitHelper(data); +} + +void Assembler::dd(Label* label) { + uint64_t data; + if (!is_buffer_growth_blocked()) CheckBuffer(); + if (label->is_bound()) { + data = reinterpret_cast(buffer_start_ + label->pos()); + } else { + data = jump_address(label); + internal_reference_positions_.insert(label->pos()); + } + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + EmitHelper(data); +} + +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { + if (!ShouldRecordRelocInfo(rmode)) return; + // We do not try to reuse pool constants. + RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); + DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here. + reloc_info_writer.Write(&rinfo); +} + +void Assembler::BlockTrampolinePoolFor(int instructions) { + DEBUG_PRINTF("\tBlockTrampolinePoolFor %d", instructions); + CheckTrampolinePoolQuick(instructions); + DEBUG_PRINTF("\tpc_offset %d,BlockTrampolinePoolBefore %d\n", pc_offset(), + pc_offset() + instructions * kInstrSize); + BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); +} + +void Assembler::CheckTrampolinePool() { + // Some small sequences of instructions must not be broken up by the + // insertion of a trampoline pool; such sequences are protected by setting + // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, + // which are both checked here. Also, recursive calls to CheckTrampolinePool + // are blocked by trampoline_pool_blocked_nesting_. + DEBUG_PRINTF("\tpc_offset %d no_trampoline_pool_before:%d\n", pc_offset(), + no_trampoline_pool_before_); + DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n", + trampoline_pool_blocked_nesting_); + if ((trampoline_pool_blocked_nesting_ > 0) || + (pc_offset() < no_trampoline_pool_before_)) { + // Emission is currently blocked; make sure we try again as soon as + // possible. + if (trampoline_pool_blocked_nesting_ > 0) { + next_buffer_check_ = pc_offset() + kInstrSize; + } else { + next_buffer_check_ = no_trampoline_pool_before_; + } + return; + } + + DCHECK(!trampoline_emitted_); + DCHECK_GE(unbound_labels_count_, 0); + if (unbound_labels_count_ > 0) { + // First we emit jump, then we emit trampoline pool. + { + DEBUG_PRINTF("inserting trampoline pool at %p (%d)\n", + reinterpret_cast(buffer_start_ + pc_offset()), + pc_offset()); + BlockTrampolinePoolScope block_trampoline_pool(this); + Label after_pool; + j(&after_pool); + + int pool_start = pc_offset(); + for (int i = 0; i < unbound_labels_count_; i++) { + int64_t imm64; + imm64 = branch_long_offset(&after_pool); + CHECK(is_int32(imm64 + 0x800)); + int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12); + int32_t Lo12 = (int32_t)imm64 << 20 >> 20; + auipc(t6, Hi20); // Read PC + Hi20 into t6 + jr(t6, Lo12); // jump PC + Hi20 + Lo12 + } + // If unbound_labels_count_ is big enough, label after_pool will + // need a trampoline too, so we must create the trampoline before + // the bind operation to make sure function 'bind' can get this + // information. + trampoline_ = Trampoline(pool_start, unbound_labels_count_); + bind(&after_pool); + + trampoline_emitted_ = true; + // As we are only going to emit trampoline once, we need to prevent any + // further emission. + next_buffer_check_ = kMaxInt; + } + } else { + // Number of branches to unbound label at this point is zero, so we can + // move next buffer check to maximum. + next_buffer_check_ = + pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16; + } + return; +} + +void Assembler::set_target_address_at(Address pc, Address constant_pool, + Address target, + ICacheFlushMode icache_flush_mode) { + Instr* instr = reinterpret_cast(pc); + if (IsAuipc(*instr)) { + if (IsLd(*reinterpret_cast(pc + 4))) { + int32_t Hi20 = AuipcOffset(*instr); + int32_t Lo12 = LdOffset(*reinterpret_cast(pc + 4)); + Memory
(pc + Hi20 + Lo12) = target; + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize); + } + } else { + DCHECK(IsJalr(*reinterpret_cast(pc + 4))); + int64_t imm = (int64_t)target - (int64_t)pc; + Instr instr = instr_at(pc); + Instr instr1 = instr_at(pc + 1 * kInstrSize); + DCHECK(is_int32(imm + 0x800)); + int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + FlushInstructionCache(pc, num * kInstrSize); + } + } + } else { + set_target_address_at(pc, target, icache_flush_mode); + } +} + +Address Assembler::target_address_at(Address pc, Address constant_pool) { + Instr* instr = reinterpret_cast(pc); + if (IsAuipc(*instr)) { + if (IsLd(*reinterpret_cast(pc + 4))) { + int32_t Hi20 = AuipcOffset(*instr); + int32_t Lo12 = LdOffset(*reinterpret_cast(pc + 4)); + return Memory
(pc + Hi20 + Lo12); + } else { + DCHECK(IsJalr(*reinterpret_cast(pc + 4))); + int32_t Hi20 = AuipcOffset(*instr); + int32_t Lo12 = JalrOffset(*reinterpret_cast(pc + 4)); + return pc + Hi20 + Lo12; + } + + } else { + return target_address_at(pc); + } +} +Address Assembler::target_address_at(Address pc) { + DEBUG_PRINTF("target_address_at: pc: %lx\t", pc); + Instruction* instr0 = Instruction::At((unsigned char*)pc); + Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize)); + Instruction* instr2 = Instruction::At((unsigned char*)(pc + 2 * kInstrSize)); + Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize)); + Instruction* instr4 = Instruction::At((unsigned char*)(pc + 4 * kInstrSize)); + Instruction* instr5 = Instruction::At((unsigned char*)(pc + 5 * kInstrSize)); + + // Interpret instructions for address generated by li: See listing in + // Assembler::set_target_address_at() just below. + if (IsLui(*reinterpret_cast(instr0)) && + IsAddi(*reinterpret_cast(instr1)) && + IsSlli(*reinterpret_cast(instr2)) && + IsOri(*reinterpret_cast(instr3)) && + IsSlli(*reinterpret_cast(instr4)) && + IsOri(*reinterpret_cast(instr5))) { + // Assemble the 64 bit value. + int64_t addr = (int64_t)(instr0->Imm20UValue() << kImm20Shift) + + (int64_t)instr1->Imm12Value(); + addr <<= 11; + addr |= (int64_t)instr3->Imm12Value(); + addr <<= 6; + addr |= (int64_t)instr5->Imm12Value(); + + DEBUG_PRINTF("addr: %lx\n", addr); + return static_cast
(addr); + } + // We should never get here, force a bad address if we do. + UNREACHABLE(); +} +// On RISC-V, a 48-bit target address is stored in an 6-instruction sequence: +// lui(reg, (int32_t)high_20); // 19 high bits +// addi(reg, reg, low_12); // 12 following bits. total is 31 high bits in reg. +// slli(reg, reg, 11); // Space for next 11 bits +// ori(reg, reg, b11); // 11 bits are put in. 42 bit in reg +// slli(reg, reg, 6); // Space for next 6 bits +// ori(reg, reg, a6); // 6 bits are put in. all 48 bis in reg +// +// Patching the address must replace all instructions, and flush the i-cache. +// Note that this assumes the use of SV48, the 48-bit virtual memory system. +void Assembler::set_target_value_at(Address pc, uint64_t target, + ICacheFlushMode icache_flush_mode) { + DEBUG_PRINTF("set_target_value_at: pc: %lx\ttarget: %lx\n", pc, target); + uint32_t* p = reinterpret_cast(pc); + DCHECK_EQ((target & 0xffff000000000000ll), 0); +#ifdef DEBUG + // Check we have the result from a li macro-instruction. + Instruction* instr0 = Instruction::At((unsigned char*)pc); + Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize)); + Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize)); + Instruction* instr5 = Instruction::At((unsigned char*)(pc + 5 * kInstrSize)); + DCHECK(IsLui(*reinterpret_cast(instr0)) && + IsAddi(*reinterpret_cast(instr1)) && + IsOri(*reinterpret_cast(instr3)) && + IsOri(*reinterpret_cast(instr5))); +#endif + int64_t a6 = target & 0x3f; // bits 0:6. 6 bits + int64_t b11 = (target >> 6) & 0x7ff; // bits 6:11. 11 bits + int64_t high_31 = (target >> 17) & 0x7fffffff; // 31 bits + int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits + int64_t low_12 = high_31 & 0xfff; // 12 bits + *p = *p & 0xfff; + *p = *p | ((int32_t)high_20 << 12); + *(p + 1) = *(p + 1) & 0xfffff; + *(p + 1) = *(p + 1) | ((int32_t)low_12 << 20); + *(p + 2) = *(p + 2) & 0xfffff; + *(p + 2) = *(p + 2) | (11 << 20); + *(p + 3) = *(p + 3) & 0xfffff; + *(p + 3) = *(p + 3) | ((int32_t)b11 << 20); + *(p + 4) = *(p + 4) & 0xfffff; + *(p + 4) = *(p + 4) | (6 << 20); + *(p + 5) = *(p + 5) & 0xfffff; + *(p + 5) = *(p + 5) | ((int32_t)a6 << 20); + if (icache_flush_mode != SKIP_ICACHE_FLUSH) { + FlushInstructionCache(pc, 8 * kInstrSize); + } + DCHECK_EQ(target_address_at(pc), target); +} +UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) + : available_(assembler->GetScratchRegisterList()), + old_available_(*available_) {} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + *available_ = old_available_; +} + +Register UseScratchRegisterScope::Acquire() { + DCHECK_NOT_NULL(available_); + DCHECK(!available_->is_empty()); + int index = + static_cast(base::bits::CountTrailingZeros32(available_->bits())); + *available_ &= RegList::FromBits(~(1U << index)); + + return Register::from_code(index); +} + +bool UseScratchRegisterScope::hasAvailable() const { + return !available_->is_empty(); +} + +bool Assembler::IsConstantPoolAt(Instruction* instr) { + // The constant pool marker is made of two instructions. These instructions + // will never be emitted by the JIT, so checking for the first one is enough: + // 0: ld x0, x0, #offset + Instr instr_value = *reinterpret_cast(instr); + bool result = IsLd(instr_value) && (instr->Rs1Value() == kRegCode_zero_reg) && + (instr->RdValue() == kRegCode_zero_reg); +#ifdef DEBUG + // It is still worth asserting the marker is complete. + // 1: j 0x0 + Instruction* instr_following = instr + kInstrSize; + DCHECK(!result || (IsJal(*reinterpret_cast(instr_following)) && + instr_following->Imm20JValue() == 0 && + instr_following->RdValue() == kRegCode_zero_reg)); +#endif + return result; +} + +int Assembler::ConstantPoolSizeAt(Instruction* instr) { + if (IsConstantPoolAt(instr)) { + return instr->Imm12Value(); + } else { + return -1; + } +} + +void Assembler::RecordConstPool(int size) { + // We only need this for debugger support, to correctly compute offsets in the + // code. + Assembler::BlockPoolsScope block_pools(this); + RecordRelocInfo(RelocInfo::CONST_POOL, static_cast(size)); +} + +void Assembler::EmitPoolGuard() { + // We must generate only one instruction as this is used in scopes that + // control the size of the code generated. + j(0); +} + +// Constant Pool + +void ConstantPool::EmitPrologue(Alignment require_alignment) { + // Recorded constant pool size is expressed in number of 32-bits words, + // and includes prologue and alignment, but not the jump around the pool + // and the size of the marker itself. + const int marker_size = 1; + int word_count = + ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size; + assm_->ld(zero_reg, zero_reg, word_count); + assm_->EmitPoolGuard(); +} + +int ConstantPool::PrologueSize(Jump require_jump) const { + // Prologue is: + // j over ;; if require_jump + // ld x0, x0, #pool_size + // j 0x0 + int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0; + prologue_size += 2 * kInstrSize; + return prologue_size; +} + +void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset, + Instruction* entry_offset, + const ConstantPoolKey& key) { + Instr instr_auipc = assm_->instr_at(load_offset); + Instr instr_ld = assm_->instr_at(load_offset + 4); + // Instruction to patch must be 'ld rd, offset(rd)' with 'offset == 0'. + DCHECK(assm_->IsAuipc(instr_auipc)); + DCHECK(assm_->IsLd(instr_ld)); + DCHECK_EQ(assm_->LdOffset(instr_ld), 0); + DCHECK_EQ(assm_->AuipcOffset(instr_auipc), 0); + int32_t distance = static_cast( + reinterpret_cast
(entry_offset) - + reinterpret_cast
(assm_->toAddress(load_offset))); + CHECK(is_int32(distance + 0x800)); + int32_t Hi20 = (((int32_t)distance + 0x800) >> 12); + int32_t Lo12 = (int32_t)distance << 20 >> 20; + assm_->instr_at_put(load_offset, SetAuipcOffset(Hi20, instr_auipc)); + assm_->instr_at_put(load_offset + 4, SetLdOffset(Lo12, instr_ld)); +} + +void ConstantPool::Check(Emission force_emit, Jump require_jump, + size_t margin) { + // Some short sequence of instruction must not be broken up by constant pool + // emission, such sequences are protected by a ConstPool::BlockScope. + if (IsBlocked()) { + // Something is wrong if emission is forced and blocked at the same time. + DCHECK_EQ(force_emit, Emission::kIfNeeded); + return; + } + + // We emit a constant pool only if : + // * it is not empty + // * emission is forced by parameter force_emit (e.g. at function end). + // * emission is mandatory or opportune according to {ShouldEmitNow}. + if (!IsEmpty() && (force_emit == Emission::kForced || + ShouldEmitNow(require_jump, margin))) { + // Emit veneers for branches that would go out of range during emission of + // the constant pool. + int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired); + + // Check that the code buffer is large enough before emitting the constant + // pool (this includes the gap to the relocation information). + int needed_space = worst_case_size + assm_->kGap; + while (assm_->buffer_space() <= needed_space) { + assm_->GrowBuffer(); + } + + EmitAndClear(require_jump); + } + // Since a constant pool is (now) empty, move the check offset forward by + // the standard interval. + SetNextCheckIn(ConstantPool::kCheckInterval); +} + +LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep, + uint8_t laneidx) { + switch (rep) { + case MachineRepresentation::kWord8: + *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16); + break; + case MachineRepresentation::kWord16: + *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8); + break; + case MachineRepresentation::kWord32: + *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4); + break; + case MachineRepresentation::kWord64: + *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2); + break; + default: + UNREACHABLE(); + } +} + +// Pool entries are accessed with pc relative load therefore this cannot be more +// than 1 * MB. Since constant pool emission checks are interval based, and we +// want to keep entries close to the code, we try to emit every 64KB. +const size_t ConstantPool::kMaxDistToPool32 = 1 * MB; +const size_t ConstantPool::kMaxDistToPool64 = 1 * MB; +const size_t ConstantPool::kCheckInterval = 128 * kInstrSize; +const size_t ConstantPool::kApproxDistToPool32 = 64 * KB; +const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32; + +const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB; +const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB; +const size_t ConstantPool::kApproxMaxEntryCount = 512; + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_RISCV64 diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h new file mode 100644 index 00000000000000..2b0d262369c8f3 --- /dev/null +++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h @@ -0,0 +1,1829 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2021 the V8 project authors. All rights reserved. + +#ifndef V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_H_ +#define V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_H_ + +#include + +#include +#include + +#include "src/codegen/assembler.h" +#include "src/codegen/constant-pool.h" +#include "src/codegen/external-reference.h" +#include "src/codegen/label.h" +#include "src/codegen/riscv64/constants-riscv64.h" +#include "src/codegen/riscv64/register-riscv64.h" +#include "src/objects/contexts.h" +#include "src/objects/smi.h" + +namespace v8 { +namespace internal { + +#define DEBUG_PRINTF(...) \ + if (FLAG_riscv_debug) { \ + printf(__VA_ARGS__); \ + } + +class SafepointTableBuilder; + +// ----------------------------------------------------------------------------- +// Machine instruction Operands. +constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize; +constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; +// Class Operand represents a shifter operand in data processing instructions. +class Operand { + public: + // Immediate. + V8_INLINE explicit Operand(int64_t immediate, + RelocInfo::Mode rmode = RelocInfo::NO_INFO) + : rm_(no_reg), rmode_(rmode) { + value_.immediate = immediate; + } + V8_INLINE explicit Operand(const ExternalReference& f) + : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { + value_.immediate = static_cast(f.address()); + } + V8_INLINE explicit Operand(const char* s); + explicit Operand(Handle handle); + V8_INLINE explicit Operand(Smi value) + : rm_(no_reg), rmode_(RelocInfo::NO_INFO) { + value_.immediate = static_cast(value.ptr()); + } + + static Operand EmbeddedNumber(double number); // Smi or HeapNumber. + static Operand EmbeddedStringConstant(const StringConstantBase* str); + + // Register. + V8_INLINE explicit Operand(Register rm) : rm_(rm) {} + + // Return true if this is a register operand. + V8_INLINE bool is_reg() const; + + inline int64_t immediate() const; + + bool IsImmediate() const { return !rm_.is_valid(); } + + HeapObjectRequest heap_object_request() const { + DCHECK(IsHeapObjectRequest()); + return value_.heap_object_request; + } + + bool IsHeapObjectRequest() const { + DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); + DCHECK_IMPLIES(is_heap_object_request_, + rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || + rmode_ == RelocInfo::CODE_TARGET); + return is_heap_object_request_; + } + + Register rm() const { return rm_; } + + RelocInfo::Mode rmode() const { return rmode_; } + + private: + Register rm_; + union Value { + Value() {} + HeapObjectRequest heap_object_request; // if is_heap_object_request_ + int64_t immediate; // otherwise + } value_; // valid if rm_ == no_reg + bool is_heap_object_request_ = false; + RelocInfo::Mode rmode_; + + friend class Assembler; + friend class MacroAssembler; +}; + +// On RISC-V we have only one addressing mode with base_reg + offset. +// Class MemOperand represents a memory operand in load and store instructions. +class V8_EXPORT_PRIVATE MemOperand : public Operand { + public: + // Immediate value attached to offset. + enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 }; + + explicit MemOperand(Register rn, int32_t offset = 0); + explicit MemOperand(Register rn, int32_t unit, int32_t multiplier, + OffsetAddend offset_addend = offset_zero); + int32_t offset() const { return offset_; } + + bool OffsetIsInt12Encodable() const { return is_int12(offset_); } + + private: + int32_t offset_; + + friend class Assembler; +}; + +class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { + public: + // Create an assembler. Instructions and relocation information are emitted + // into a buffer, with the instructions starting from the beginning and the + // relocation information starting from the end of the buffer. See CodeDesc + // for a detailed comment on the layout (globals.h). + // + // If the provided buffer is nullptr, the assembler allocates and grows its + // own buffer. Otherwise it takes ownership of the provided buffer. + explicit Assembler(const AssemblerOptions&, + std::unique_ptr = {}); + + virtual ~Assembler(); + void AbortedCodeGeneration(); + // GetCode emits any pending (non-emitted) code and fills the descriptor desc. + static constexpr int kNoHandlerTable = 0; + static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; + void GetCode(Isolate* isolate, CodeDesc* desc, + SafepointTableBuilder* safepoint_table_builder, + int handler_table_offset); + + // Convenience wrapper for code without safepoint or handler tables. + void GetCode(Isolate* isolate, CodeDesc* desc) { + GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); + } + + // Unused on this architecture. + void MaybeEmitOutOfLineConstantPool() {} + + // Label operations & relative jumps (PPUM Appendix D). + // + // Takes a branch opcode (cc) and a label (L) and generates + // either a backward branch or a forward branch and links it + // to the label fixup chain. Usage: + // + // Label L; // unbound label + // j(cc, &L); // forward branch to unbound label + // bind(&L); // bind label to the current pc + // j(cc, &L); // backward branch to bound label + // bind(&L); // illegal: a label may be bound only once + // + // Note: The same Label can be used for forward and backward branches + // but it may be bound only once. + void bind(Label* L); // Binds an unbound label L to current code position. + + enum OffsetSize : int { + kOffset21 = 21, // RISCV jal + kOffset12 = 12, // RISCV imm12 + kOffset20 = 20, // RISCV imm20 + kOffset13 = 13, // RISCV branch + kOffset32 = 32, // RISCV auipc + instr_I + kOffset11 = 11, // RISCV C_J + kOffset8 = 8 // RISCV compressed branch + }; + + // Determines if Label is bound and near enough so that branch instruction + // can be used to reach it, instead of jump instruction. + bool is_near(Label* L); + bool is_near(Label* L, OffsetSize bits); + bool is_near_branch(Label* L); + + // Get offset from instr. + int BranchOffset(Instr instr); + static int BrachlongOffset(Instr auipc, Instr jalr); + static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I, + int32_t offset); + int JumpOffset(Instr instr); + int CJumpOffset(Instr instr); + int CBranchOffset(Instr instr); + static int LdOffset(Instr instr); + static int AuipcOffset(Instr instr); + static int JalrOffset(Instr instr); + + // Returns the branch offset to the given label from the current code + // position. Links the label to the current position if it is still unbound. + // Manages the jump elimination optimization if the second parameter is true. + int32_t branch_offset_helper(Label* L, OffsetSize bits); + inline int32_t branch_offset(Label* L) { + return branch_offset_helper(L, OffsetSize::kOffset13); + } + inline int32_t jump_offset(Label* L) { + return branch_offset_helper(L, OffsetSize::kOffset21); + } + inline int16_t cjump_offset(Label* L) { + return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11); + } + inline int32_t cbranch_offset(Label* L) { + return branch_offset_helper(L, OffsetSize::kOffset8); + } + + uint64_t jump_address(Label* L); + uint64_t branch_long_offset(Label* L); + + // Puts a labels target address at the given position. + // The high 8 bits are set to zero. + void label_at_put(Label* L, int at_offset); + + // Read/Modify the code target address in the branch/call instruction at pc. + // The isolate argument is unused (and may be nullptr) when skipping flushing. + static Address target_address_at(Address pc); + V8_INLINE static void set_target_address_at( + Address pc, Address target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { + set_target_value_at(pc, target, icache_flush_mode); + } + + static Address target_address_at(Address pc, Address constant_pool); + + static void set_target_address_at( + Address pc, Address constant_pool, Address target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + + // Read/Modify the code target address in the branch/call instruction at pc. + inline static Tagged_t target_compressed_address_at(Address pc, + Address constant_pool); + inline static void set_target_compressed_address_at( + Address pc, Address constant_pool, Tagged_t target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + + inline Handle code_target_object_handle_at(Address pc, + Address constant_pool); + inline Handle compressed_embedded_object_handle_at( + Address pc, Address constant_pool); + + static bool IsConstantPoolAt(Instruction* instr); + static int ConstantPoolSizeAt(Instruction* instr); + // See Assembler::CheckConstPool for more info. + void EmitPoolGuard(); + + static void set_target_value_at( + Address pc, uint64_t target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + + static void JumpLabelToJumpRegister(Address pc); + + // This sets the branch destination (which gets loaded at the call address). + // This is for calls and branches within generated code. The serializer + // has already deserialized the lui/ori instructions etc. + inline static void deserialization_set_special_target_at( + Address instruction_payload, Code code, Address target); + + // Get the size of the special target encoded at 'instruction_payload'. + inline static int deserialization_special_target_size( + Address instruction_payload); + + // This sets the internal reference at the pc. + inline static void deserialization_set_target_internal_reference_at( + Address pc, Address target, + RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); + + // Difference between address of current opcode and target address offset. + static constexpr int kBranchPCOffset = kInstrSize; + + // Difference between address of current opcode and target address offset, + // when we are generatinga sequence of instructions for long relative PC + // branches + static constexpr int kLongBranchPCOffset = 3 * kInstrSize; + + // Adjust ra register in branch delay slot of bal instruction so to skip + // instructions not needed after optimization of PIC in + // TurboAssembler::BranchAndLink method. + + static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize; + + // Here we are patching the address in the LUI/ADDI instruction pair. + // These values are used in the serialization process and must be zero for + // RISC-V platform, as Code, Embedded Object or External-reference pointers + // are split across two consecutive instructions and don't exist separately + // in the code, so the serializer should not step forwards in memory after + // a target is resolved and written. + static constexpr int kSpecialTargetSize = 0; + + // Number of consecutive instructions used to store 32bit/64bit constant. + // This constant was used in RelocInfo::target_address_address() function + // to tell serializer address of the instruction that follows + // LUI/ADDI instruction pair. + static constexpr int kInstructionsFor32BitConstant = 2; + static constexpr int kInstructionsFor64BitConstant = 8; + + // Difference between address of current opcode and value read from pc + // register. + static constexpr int kPcLoadDelta = 4; + + // Bits available for offset field in branches + static constexpr int kBranchOffsetBits = 13; + + // Bits available for offset field in jump + static constexpr int kJumpOffsetBits = 21; + + // Bits available for offset field in compresed jump + static constexpr int kCJalOffsetBits = 12; + + // Bits available for offset field in compressed branch + static constexpr int kCBranchOffsetBits = 9; + + // Max offset for b instructions with 12-bit offset field (multiple of 2) + static constexpr int kMaxBranchOffset = (1 << (13 - 1)) - 1; + + // Max offset for jal instruction with 20-bit offset field (multiple of 2) + static constexpr int kMaxJumpOffset = (1 << (21 - 1)) - 1; + + static constexpr int kTrampolineSlotsSize = 2 * kInstrSize; + + RegList* GetScratchRegisterList() { return &scratch_register_list_; } + + // --------------------------------------------------------------------------- + // Code generation. + + // Insert the smallest number of nop instructions + // possible to align the pc offset to a multiple + // of m. m must be a power of 2 (>= 4). + void Align(int m); + // Insert the smallest number of zero bytes possible to align the pc offset + // to a mulitple of m. m must be a power of 2 (>= 2). + void DataAlign(int m); + // Aligns code to something that's optimal for a jump target for the platform. + void CodeTargetAlign(); + void LoopHeaderAlign() { CodeTargetAlign(); } + + // Different nop operations are used by the code generator to detect certain + // states of the generated code. + enum NopMarkerTypes { + NON_MARKING_NOP = 0, + DEBUG_BREAK_NOP, + // IC markers. + PROPERTY_ACCESS_INLINED, + PROPERTY_ACCESS_INLINED_CONTEXT, + PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, + // Helper values. + LAST_CODE_MARKER, + FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, + }; + + // RISC-V Instructions Emited to a buffer + + void lui(Register rd, int32_t imm20); + void auipc(Register rd, int32_t imm20); + + // Jumps + void jal(Register rd, int32_t imm20); + void jalr(Register rd, Register rs1, int16_t imm12); + + // Branches + void beq(Register rs1, Register rs2, int16_t imm12); + inline void beq(Register rs1, Register rs2, Label* L) { + beq(rs1, rs2, branch_offset(L)); + } + void bne(Register rs1, Register rs2, int16_t imm12); + inline void bne(Register rs1, Register rs2, Label* L) { + bne(rs1, rs2, branch_offset(L)); + } + void blt(Register rs1, Register rs2, int16_t imm12); + inline void blt(Register rs1, Register rs2, Label* L) { + blt(rs1, rs2, branch_offset(L)); + } + void bge(Register rs1, Register rs2, int16_t imm12); + inline void bge(Register rs1, Register rs2, Label* L) { + bge(rs1, rs2, branch_offset(L)); + } + void bltu(Register rs1, Register rs2, int16_t imm12); + inline void bltu(Register rs1, Register rs2, Label* L) { + bltu(rs1, rs2, branch_offset(L)); + } + void bgeu(Register rs1, Register rs2, int16_t imm12); + inline void bgeu(Register rs1, Register rs2, Label* L) { + bgeu(rs1, rs2, branch_offset(L)); + } + + // Loads + void lb(Register rd, Register rs1, int16_t imm12); + void lh(Register rd, Register rs1, int16_t imm12); + void lw(Register rd, Register rs1, int16_t imm12); + void lbu(Register rd, Register rs1, int16_t imm12); + void lhu(Register rd, Register rs1, int16_t imm12); + + // Stores + void sb(Register source, Register base, int16_t imm12); + void sh(Register source, Register base, int16_t imm12); + void sw(Register source, Register base, int16_t imm12); + + // Arithmetic with immediate + void addi(Register rd, Register rs1, int16_t imm12); + void slti(Register rd, Register rs1, int16_t imm12); + void sltiu(Register rd, Register rs1, int16_t imm12); + void xori(Register rd, Register rs1, int16_t imm12); + void ori(Register rd, Register rs1, int16_t imm12); + void andi(Register rd, Register rs1, int16_t imm12); + void slli(Register rd, Register rs1, uint8_t shamt); + void srli(Register rd, Register rs1, uint8_t shamt); + void srai(Register rd, Register rs1, uint8_t shamt); + + // Arithmetic + void add(Register rd, Register rs1, Register rs2); + void sub(Register rd, Register rs1, Register rs2); + void sll(Register rd, Register rs1, Register rs2); + void slt(Register rd, Register rs1, Register rs2); + void sltu(Register rd, Register rs1, Register rs2); + void xor_(Register rd, Register rs1, Register rs2); + void srl(Register rd, Register rs1, Register rs2); + void sra(Register rd, Register rs1, Register rs2); + void or_(Register rd, Register rs1, Register rs2); + void and_(Register rd, Register rs1, Register rs2); + + // Memory fences + void fence(uint8_t pred, uint8_t succ); + void fence_tso(); + + // Environment call / break + void ecall(); + void ebreak(); + + // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented + // instruction (i.e., it should always trap, if your implementation has + // invalid instruction traps). + void unimp(); + + // CSR + void csrrw(Register rd, ControlStatusReg csr, Register rs1); + void csrrs(Register rd, ControlStatusReg csr, Register rs1); + void csrrc(Register rd, ControlStatusReg csr, Register rs1); + void csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5); + void csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5); + void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5); + + // RV64I + void lwu(Register rd, Register rs1, int16_t imm12); + void ld(Register rd, Register rs1, int16_t imm12); + void sd(Register source, Register base, int16_t imm12); + void addiw(Register rd, Register rs1, int16_t imm12); + void slliw(Register rd, Register rs1, uint8_t shamt); + void srliw(Register rd, Register rs1, uint8_t shamt); + void sraiw(Register rd, Register rs1, uint8_t shamt); + void addw(Register rd, Register rs1, Register rs2); + void subw(Register rd, Register rs1, Register rs2); + void sllw(Register rd, Register rs1, Register rs2); + void srlw(Register rd, Register rs1, Register rs2); + void sraw(Register rd, Register rs1, Register rs2); + + // RV32M Standard Extension + void mul(Register rd, Register rs1, Register rs2); + void mulh(Register rd, Register rs1, Register rs2); + void mulhsu(Register rd, Register rs1, Register rs2); + void mulhu(Register rd, Register rs1, Register rs2); + void div(Register rd, Register rs1, Register rs2); + void divu(Register rd, Register rs1, Register rs2); + void rem(Register rd, Register rs1, Register rs2); + void remu(Register rd, Register rs1, Register rs2); + + // RV64M Standard Extension (in addition to RV32M) + void mulw(Register rd, Register rs1, Register rs2); + void divw(Register rd, Register rs1, Register rs2); + void divuw(Register rd, Register rs1, Register rs2); + void remw(Register rd, Register rs1, Register rs2); + void remuw(Register rd, Register rs1, Register rs2); + + // RV32A Standard Extension + void lr_w(bool aq, bool rl, Register rd, Register rs1); + void sc_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amomin_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amomax_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amominu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amomaxu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2); + + // RV64A Standard Extension (in addition to RV32A) + void lr_d(bool aq, bool rl, Register rd, Register rs1); + void sc_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoswap_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoadd_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoxor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoand_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amoor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amomin_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amomax_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amominu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + void amomaxu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2); + + // RV32F Standard Extension + void flw(FPURegister rd, Register rs1, int16_t imm12); + void fsw(FPURegister source, Register base, int16_t imm12); + void fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fsqrt_s(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE); + void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fcvt_w_s(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fcvt_wu_s(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fmv_x_w(Register rd, FPURegister rs1); + void feq_s(Register rd, FPURegister rs1, FPURegister rs2); + void flt_s(Register rd, FPURegister rs1, FPURegister rs2); + void fle_s(Register rd, FPURegister rs1, FPURegister rs2); + void fclass_s(Register rd, FPURegister rs1); + void fcvt_s_w(FPURegister rd, Register rs1, RoundingMode frm = RNE); + void fcvt_s_wu(FPURegister rd, Register rs1, RoundingMode frm = RNE); + void fmv_w_x(FPURegister rd, Register rs1); + + // RV64F Standard Extension (in addition to RV32F) + void fcvt_l_s(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fcvt_lu_s(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fcvt_s_l(FPURegister rd, Register rs1, RoundingMode frm = RNE); + void fcvt_s_lu(FPURegister rd, Register rs1, RoundingMode frm = RNE); + + // RV32D Standard Extension + void fld(FPURegister rd, Register rs1, int16_t imm12); + void fsd(FPURegister source, Register base, int16_t imm12); + void fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + FPURegister rs3, RoundingMode frm = RNE); + void fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2, + RoundingMode frm = RNE); + void fsqrt_d(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE); + void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2); + void fcvt_s_d(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE); + void fcvt_d_s(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE); + void feq_d(Register rd, FPURegister rs1, FPURegister rs2); + void flt_d(Register rd, FPURegister rs1, FPURegister rs2); + void fle_d(Register rd, FPURegister rs1, FPURegister rs2); + void fclass_d(Register rd, FPURegister rs1); + void fcvt_w_d(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fcvt_wu_d(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fcvt_d_w(FPURegister rd, Register rs1, RoundingMode frm = RNE); + void fcvt_d_wu(FPURegister rd, Register rs1, RoundingMode frm = RNE); + + // RV64D Standard Extension (in addition to RV32D) + void fcvt_l_d(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fcvt_lu_d(Register rd, FPURegister rs1, RoundingMode frm = RNE); + void fmv_x_d(Register rd, FPURegister rs1); + void fcvt_d_l(FPURegister rd, Register rs1, RoundingMode frm = RNE); + void fcvt_d_lu(FPURegister rd, Register rs1, RoundingMode frm = RNE); + void fmv_d_x(FPURegister rd, Register rs1); + + // RV64C Standard Extension + void c_nop(); + void c_addi(Register rd, int8_t imm6); + void c_addiw(Register rd, int8_t imm6); + void c_addi16sp(int16_t imm10); + void c_addi4spn(Register rd, int16_t uimm10); + void c_li(Register rd, int8_t imm6); + void c_lui(Register rd, int8_t imm6); + void c_slli(Register rd, uint8_t shamt6); + void c_fldsp(FPURegister rd, uint16_t uimm9); + void c_lwsp(Register rd, uint16_t uimm8); + void c_ldsp(Register rd, uint16_t uimm9); + void c_jr(Register rs1); + void c_mv(Register rd, Register rs2); + void c_ebreak(); + void c_jalr(Register rs1); + void c_j(int16_t imm12); + inline void c_j(Label* L) { c_j(cjump_offset(L)); } + void c_add(Register rd, Register rs2); + void c_sub(Register rd, Register rs2); + void c_and(Register rd, Register rs2); + void c_xor(Register rd, Register rs2); + void c_or(Register rd, Register rs2); + void c_subw(Register rd, Register rs2); + void c_addw(Register rd, Register rs2); + void c_swsp(Register rs2, uint16_t uimm8); + void c_sdsp(Register rs2, uint16_t uimm9); + void c_fsdsp(FPURegister rs2, uint16_t uimm9); + void c_lw(Register rd, Register rs1, uint16_t uimm7); + void c_ld(Register rd, Register rs1, uint16_t uimm8); + void c_fld(FPURegister rd, Register rs1, uint16_t uimm8); + void c_sw(Register rs2, Register rs1, uint16_t uimm7); + void c_sd(Register rs2, Register rs1, uint16_t uimm8); + void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8); + void c_bnez(Register rs1, int16_t imm9); + inline void c_bnez(Register rs1, Label* L) { c_bnez(rs1, branch_offset(L)); } + void c_beqz(Register rs1, int16_t imm9); + inline void c_beqz(Register rs1, Label* L) { c_beqz(rs1, branch_offset(L)); } + void c_srli(Register rs1, int8_t shamt6); + void c_srai(Register rs1, int8_t shamt6); + void c_andi(Register rs1, int8_t imm6); + void NOP(); + void EBREAK(); + + // RVV + static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu, + MaskAgnosticType mask = mu) { + return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7); + } + + void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, + MaskType mask = NoMask); + void vls(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask = NoMask); + void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew, + MaskType mask = NoMask); + + void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, + MaskType mask = NoMask); + void vss(VRegister vd, Register rs1, Register rs2, VSew vsew, + MaskType mask = NoMask); + void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew, + MaskType mask = NoMask); + + void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew, + MaskType mask = NoMask); + +#define SegInstr(OP) \ + void OP##seg2(ARG); \ + void OP##seg3(ARG); \ + void OP##seg4(ARG); \ + void OP##seg5(ARG); \ + void OP##seg6(ARG); \ + void OP##seg7(ARG); \ + void OP##seg8(ARG); + +#define ARG \ + VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask + + SegInstr(vl) SegInstr(vs) +#undef ARG + +#define ARG \ + VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask + + SegInstr(vls) SegInstr(vss) +#undef ARG + +#define ARG \ + VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask + + SegInstr(vsx) SegInstr(vlx) +#undef ARG +#undef SegInstr + + // RVV Vector Arithmetic Instruction + + void vmv_vv(VRegister vd, VRegister vs1); + void vmv_vx(VRegister vd, Register rs1); + void vmv_vi(VRegister vd, uint8_t simm5); + void vmv_xs(Register rd, VRegister vs2); + void vmv_sx(VRegister vd, Register rs1); + void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2); + void vmerge_vx(VRegister vd, Register rs1, VRegister vs2); + void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2); + + void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask = NoMask); + void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask = NoMask); + void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask = NoMask); + void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1, + MaskType mask = NoMask); + + void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2); + void vadc_vx(VRegister vd, Register rs1, VRegister vs2); + void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2); + + void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2); + void vmadc_vx(VRegister vd, Register rs1, VRegister vs2); + void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2); + + void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask); + void vfmv_fs(FPURegister fd, VRegister vs2); + void vfmv_sf(VRegister vd, FPURegister fs); + + void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1, + MaskType mask = NoMask); + void vid_v(VRegister vd, MaskType mask = Mask); + +#define DEFINE_OPIVV(name, funct6) \ + void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPIVX(name, funct6) \ + void name##_vx(VRegister vd, VRegister vs2, Register rs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPIVI(name, funct6) \ + void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \ + MaskType mask = NoMask); + +#define DEFINE_OPMVV(name, funct6) \ + void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPMVX(name, funct6) \ + void name##_vx(VRegister vd, VRegister vs2, Register rs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPFVV(name, funct6) \ + void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPFWV(name, funct6) \ + void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPFRED(name, funct6) \ + void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPFVF(name, funct6) \ + void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPFWF(name, funct6) \ + void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \ + MaskType mask = NoMask); + +#define DEFINE_OPFVV_FMA(name, funct6) \ + void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \ + MaskType mask = NoMask); + +#define DEFINE_OPFVF_FMA(name, funct6) \ + void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \ + MaskType mask = NoMask); + +#define DEFINE_OPMVV_VIE(name) \ + void name(VRegister vd, VRegister vs2, MaskType mask = NoMask); + + DEFINE_OPIVV(vadd, VADD_FUNCT6) + DEFINE_OPIVX(vadd, VADD_FUNCT6) + DEFINE_OPIVI(vadd, VADD_FUNCT6) + DEFINE_OPIVV(vsub, VSUB_FUNCT6) + DEFINE_OPIVX(vsub, VSUB_FUNCT6) + DEFINE_OPMVX(vdiv, VDIV_FUNCT6) + DEFINE_OPMVX(vdivu, VDIVU_FUNCT6) + DEFINE_OPMVX(vmul, VMUL_FUNCT6) + DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6) + DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6) + DEFINE_OPMVX(vmulh, VMULH_FUNCT6) + DEFINE_OPMVV(vdiv, VDIV_FUNCT6) + DEFINE_OPMVV(vdivu, VDIVU_FUNCT6) + DEFINE_OPMVV(vmul, VMUL_FUNCT6) + DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6) + DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6) + DEFINE_OPMVV(vmulh, VMULH_FUNCT6) + DEFINE_OPMVV(vwmul, VWMUL_FUNCT6) + DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6) + DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6) + DEFINE_OPMVV(vwadd, VWADD_FUNCT6) + DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6) + DEFINE_OPIVX(vsadd, VSADD_FUNCT6) + DEFINE_OPIVV(vsadd, VSADD_FUNCT6) + DEFINE_OPIVI(vsadd, VSADD_FUNCT6) + DEFINE_OPIVX(vsaddu, VSADD_FUNCT6) + DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6) + DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6) + DEFINE_OPIVX(vssub, VSSUB_FUNCT6) + DEFINE_OPIVV(vssub, VSSUB_FUNCT6) + DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6) + DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6) + DEFINE_OPIVX(vrsub, VRSUB_FUNCT6) + DEFINE_OPIVI(vrsub, VRSUB_FUNCT6) + DEFINE_OPIVV(vminu, VMINU_FUNCT6) + DEFINE_OPIVX(vminu, VMINU_FUNCT6) + DEFINE_OPIVV(vmin, VMIN_FUNCT6) + DEFINE_OPIVX(vmin, VMIN_FUNCT6) + DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6) + DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6) + DEFINE_OPIVV(vmax, VMAX_FUNCT6) + DEFINE_OPIVX(vmax, VMAX_FUNCT6) + DEFINE_OPIVV(vand, VAND_FUNCT6) + DEFINE_OPIVX(vand, VAND_FUNCT6) + DEFINE_OPIVI(vand, VAND_FUNCT6) + DEFINE_OPIVV(vor, VOR_FUNCT6) + DEFINE_OPIVX(vor, VOR_FUNCT6) + DEFINE_OPIVI(vor, VOR_FUNCT6) + DEFINE_OPIVV(vxor, VXOR_FUNCT6) + DEFINE_OPIVX(vxor, VXOR_FUNCT6) + DEFINE_OPIVI(vxor, VXOR_FUNCT6) + DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6) + DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6) + DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6) + + DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6) + DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6) + DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6) + DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6) + + DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6) + DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6) + DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6) + + DEFINE_OPIVV(vmsne, VMSNE_FUNCT6) + DEFINE_OPIVX(vmsne, VMSNE_FUNCT6) + DEFINE_OPIVI(vmsne, VMSNE_FUNCT6) + + DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6) + DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6) + + DEFINE_OPIVV(vmslt, VMSLT_FUNCT6) + DEFINE_OPIVX(vmslt, VMSLT_FUNCT6) + + DEFINE_OPIVV(vmsle, VMSLE_FUNCT6) + DEFINE_OPIVX(vmsle, VMSLE_FUNCT6) + DEFINE_OPIVI(vmsle, VMSLE_FUNCT6) + + DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6) + DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6) + DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6) + + DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6) + DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6) + + DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6) + DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6) + + DEFINE_OPIVV(vsrl, VSRL_FUNCT6) + DEFINE_OPIVX(vsrl, VSRL_FUNCT6) + DEFINE_OPIVI(vsrl, VSRL_FUNCT6) + + DEFINE_OPIVV(vsra, VSRA_FUNCT6) + DEFINE_OPIVX(vsra, VSRA_FUNCT6) + DEFINE_OPIVI(vsra, VSRA_FUNCT6) + + DEFINE_OPIVV(vsll, VSLL_FUNCT6) + DEFINE_OPIVX(vsll, VSLL_FUNCT6) + DEFINE_OPIVI(vsll, VSLL_FUNCT6) + + DEFINE_OPIVV(vsmul, VSMUL_FUNCT6) + DEFINE_OPIVX(vsmul, VSMUL_FUNCT6) + + DEFINE_OPFVV(vfadd, VFADD_FUNCT6) + DEFINE_OPFVF(vfadd, VFADD_FUNCT6) + DEFINE_OPFVV(vfsub, VFSUB_FUNCT6) + DEFINE_OPFVF(vfsub, VFSUB_FUNCT6) + DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6) + DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6) + DEFINE_OPFVV(vfmul, VFMUL_FUNCT6) + DEFINE_OPFVF(vfmul, VFMUL_FUNCT6) + + // Vector Widening Floating-Point Add/Subtract Instructions + DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6) + DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6) + DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6) + DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6) + DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6) + DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6) + DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6) + DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6) + + // Vector Widening Floating-Point Reduction Instructions + DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6) + DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6) + + // Vector Widening Floating-Point Multiply + DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6) + DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6) + + DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6) + DEFINE_OPFVV(vmfne, VMFNE_FUNCT6) + DEFINE_OPFVV(vmflt, VMFLT_FUNCT6) + DEFINE_OPFVV(vmfle, VMFLE_FUNCT6) + DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6) + DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6) + DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6) + + DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6) + DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6) + DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6) + DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6) + DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6) + DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6) + + // Vector Single-Width Floating-Point Fused Multiply-Add Instructions + DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6) + DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6) + DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6) + DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6) + DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6) + DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6) + DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6) + DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6) + DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6) + DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6) + DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6) + DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6) + DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6) + DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6) + DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6) + DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6) + + // Vector Widening Floating-Point Fused Multiply-Add Instructions + DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6) + DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6) + DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6) + DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6) + DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6) + DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6) + DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6) + DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6) + + // Vector Narrowing Fixed-Point Clip Instructions + DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6) + DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6) + DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6) + DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6) + DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6) + DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6) + + // Vector Integer Extension + DEFINE_OPMVV_VIE(vzext_vf8) + DEFINE_OPMVV_VIE(vsext_vf8) + DEFINE_OPMVV_VIE(vzext_vf4) + DEFINE_OPMVV_VIE(vsext_vf4) + DEFINE_OPMVV_VIE(vzext_vf2) + DEFINE_OPMVV_VIE(vsext_vf2) + +#undef DEFINE_OPIVI +#undef DEFINE_OPIVV +#undef DEFINE_OPIVX +#undef DEFINE_OPMVV +#undef DEFINE_OPMVX +#undef DEFINE_OPFVV +#undef DEFINE_OPFWV +#undef DEFINE_OPFVF +#undef DEFINE_OPFWF +#undef DEFINE_OPFVV_FMA +#undef DEFINE_OPFVF_FMA +#undef DEFINE_OPMVV_VIE +#undef DEFINE_OPFRED + +#define DEFINE_VFUNARY(name, funct6, vs1) \ + void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \ + GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \ + } + + DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V) + DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V) + DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V) + DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V) + DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V) + DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V) + DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V) + DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V) + DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V) + + DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W) + DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W) + DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W) + + DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V) + DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V) + DEFINE_VFUNARY(vfrsqrt7_v, VFUNARY1_FUNCT6, VFRSQRT7_V) + DEFINE_VFUNARY(vfrec7_v, VFUNARY1_FUNCT6, VFREC7_V) +#undef DEFINE_VFUNARY + + void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) { + vxor_vi(dst, src, -1, mask); + } + + void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) { + vrsub_vx(dst, src, zero_reg, mask); + } + + void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) { + vfsngjn_vv(dst, src, src, mask); + } + void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) { + vfsngjx_vv(dst, src, src, mask); + } + void vfirst_m(Register rd, VRegister vs2, MaskType mask = NoMask); + + void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask); + + // Privileged + void uret(); + void sret(); + void mret(); + void wfi(); + void sfence_vma(Register rs1, Register rs2); + + // Assembler Pseudo Instructions (Tables 25.2, 25.3, RISC-V Unprivileged ISA) + void nop(); + void RV_li(Register rd, int64_t imm); + // Returns the number of instructions required to load the immediate + static int li_estimate(int64_t imm, bool is_get_temp_reg = false); + // Loads an immediate, always using 8 instructions, regardless of the value, + // so that it can be modified later. + void li_constant(Register rd, int64_t imm); + void li_ptr(Register rd, int64_t imm); + + void mv(Register rd, Register rs) { addi(rd, rs, 0); } + void not_(Register rd, Register rs) { xori(rd, rs, -1); } + void neg(Register rd, Register rs) { sub(rd, zero_reg, rs); } + void negw(Register rd, Register rs) { subw(rd, zero_reg, rs); } + void sext_w(Register rd, Register rs) { addiw(rd, rs, 0); } + void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); } + void snez(Register rd, Register rs) { sltu(rd, zero_reg, rs); } + void sltz(Register rd, Register rs) { slt(rd, rs, zero_reg); } + void sgtz(Register rd, Register rs) { slt(rd, zero_reg, rs); } + + void fmv_s(FPURegister rd, FPURegister rs) { fsgnj_s(rd, rs, rs); } + void fabs_s(FPURegister rd, FPURegister rs) { fsgnjx_s(rd, rs, rs); } + void fneg_s(FPURegister rd, FPURegister rs) { fsgnjn_s(rd, rs, rs); } + void fmv_d(FPURegister rd, FPURegister rs) { fsgnj_d(rd, rs, rs); } + void fabs_d(FPURegister rd, FPURegister rs) { fsgnjx_d(rd, rs, rs); } + void fneg_d(FPURegister rd, FPURegister rs) { fsgnjn_d(rd, rs, rs); } + + void beqz(Register rs, int16_t imm13) { beq(rs, zero_reg, imm13); } + inline void beqz(Register rs1, Label* L) { beqz(rs1, branch_offset(L)); } + void bnez(Register rs, int16_t imm13) { bne(rs, zero_reg, imm13); } + inline void bnez(Register rs1, Label* L) { bnez(rs1, branch_offset(L)); } + void blez(Register rs, int16_t imm13) { bge(zero_reg, rs, imm13); } + inline void blez(Register rs1, Label* L) { blez(rs1, branch_offset(L)); } + void bgez(Register rs, int16_t imm13) { bge(rs, zero_reg, imm13); } + inline void bgez(Register rs1, Label* L) { bgez(rs1, branch_offset(L)); } + void bltz(Register rs, int16_t imm13) { blt(rs, zero_reg, imm13); } + inline void bltz(Register rs1, Label* L) { bltz(rs1, branch_offset(L)); } + void bgtz(Register rs, int16_t imm13) { blt(zero_reg, rs, imm13); } + + inline void bgtz(Register rs1, Label* L) { bgtz(rs1, branch_offset(L)); } + void bgt(Register rs1, Register rs2, int16_t imm13) { blt(rs2, rs1, imm13); } + inline void bgt(Register rs1, Register rs2, Label* L) { + bgt(rs1, rs2, branch_offset(L)); + } + void ble(Register rs1, Register rs2, int16_t imm13) { bge(rs2, rs1, imm13); } + inline void ble(Register rs1, Register rs2, Label* L) { + ble(rs1, rs2, branch_offset(L)); + } + void bgtu(Register rs1, Register rs2, int16_t imm13) { + bltu(rs2, rs1, imm13); + } + inline void bgtu(Register rs1, Register rs2, Label* L) { + bgtu(rs1, rs2, branch_offset(L)); + } + void bleu(Register rs1, Register rs2, int16_t imm13) { + bgeu(rs2, rs1, imm13); + } + inline void bleu(Register rs1, Register rs2, Label* L) { + bleu(rs1, rs2, branch_offset(L)); + } + + void j(int32_t imm21) { jal(zero_reg, imm21); } + inline void j(Label* L) { j(jump_offset(L)); } + inline void b(Label* L) { j(L); } + void jal(int32_t imm21) { jal(ra, imm21); } + inline void jal(Label* L) { jal(jump_offset(L)); } + void jr(Register rs) { jalr(zero_reg, rs, 0); } + void jr(Register rs, int32_t imm12) { jalr(zero_reg, rs, imm12); } + void jalr(Register rs, int32_t imm12) { jalr(ra, rs, imm12); } + void jalr(Register rs) { jalr(ra, rs, 0); } + void ret() { jalr(zero_reg, ra, 0); } + void call(int32_t offset) { + auipc(ra, (offset >> 12) + ((offset & 0x800) >> 11)); + jalr(ra, ra, offset << 20 >> 20); + } + + // Read instructions-retired counter + void rdinstret(Register rd) { csrrs(rd, csr_instret, zero_reg); } + void rdinstreth(Register rd) { csrrs(rd, csr_instreth, zero_reg); } + void rdcycle(Register rd) { csrrs(rd, csr_cycle, zero_reg); } + void rdcycleh(Register rd) { csrrs(rd, csr_cycleh, zero_reg); } + void rdtime(Register rd) { csrrs(rd, csr_time, zero_reg); } + void rdtimeh(Register rd) { csrrs(rd, csr_timeh, zero_reg); } + + void csrr(Register rd, ControlStatusReg csr) { csrrs(rd, csr, zero_reg); } + void csrw(ControlStatusReg csr, Register rs) { csrrw(zero_reg, csr, rs); } + void csrs(ControlStatusReg csr, Register rs) { csrrs(zero_reg, csr, rs); } + void csrc(ControlStatusReg csr, Register rs) { csrrc(zero_reg, csr, rs); } + + void csrwi(ControlStatusReg csr, uint8_t imm) { csrrwi(zero_reg, csr, imm); } + void csrsi(ControlStatusReg csr, uint8_t imm) { csrrsi(zero_reg, csr, imm); } + void csrci(ControlStatusReg csr, uint8_t imm) { csrrci(zero_reg, csr, imm); } + + void frcsr(Register rd) { csrrs(rd, csr_fcsr, zero_reg); } + void fscsr(Register rd, Register rs) { csrrw(rd, csr_fcsr, rs); } + void fscsr(Register rs) { csrrw(zero_reg, csr_fcsr, rs); } + + void frrm(Register rd) { csrrs(rd, csr_frm, zero_reg); } + void fsrm(Register rd, Register rs) { csrrw(rd, csr_frm, rs); } + void fsrm(Register rs) { csrrw(zero_reg, csr_frm, rs); } + + void frflags(Register rd) { csrrs(rd, csr_fflags, zero_reg); } + void fsflags(Register rd, Register rs) { csrrw(rd, csr_fflags, rs); } + void fsflags(Register rs) { csrrw(zero_reg, csr_fflags, rs); } + + // Other pseudo instructions that are not part of RISCV pseudo assemly + void nor(Register rd, Register rs, Register rt) { + or_(rd, rs, rt); + not_(rd, rd); + } + + void sync() { fence(0b1111, 0b1111); } + void break_(uint32_t code, bool break_as_stop = false); + void stop(uint32_t code = kMaxStopCode); + + // Check the code size generated from label to here. + int SizeOfCodeGeneratedSince(Label* label) { + return pc_offset() - label->pos(); + } + + // Check the number of instructions generated from label to here. + int InstructionsGeneratedSince(Label* label) { + return SizeOfCodeGeneratedSince(label) / kInstrSize; + } + + using BlockConstPoolScope = ConstantPool::BlockScope; + // Class for scoping postponing the trampoline pool generation. + class BlockTrampolinePoolScope { + public: + explicit BlockTrampolinePoolScope(Assembler* assem, int margin = 0) + : assem_(assem) { + assem_->StartBlockTrampolinePool(); + } + + explicit BlockTrampolinePoolScope(Assembler* assem, PoolEmissionCheck check) + : assem_(assem) { + assem_->StartBlockTrampolinePool(); + } + ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } + + private: + Assembler* assem_; + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); + }; + + // Class for postponing the assembly buffer growth. Typically used for + // sequences of instructions that must be emitted as a unit, before + // buffer growth (and relocation) can occur. + // This blocking scope is not nestable. + class BlockGrowBufferScope { + public: + explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { + assem_->StartBlockGrowBuffer(); + } + ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); } + + private: + Assembler* assem_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); + }; + + // Record a deoptimization reason that can be used by a log or cpu profiler. + // Use --trace-deopt to enable. + void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, + SourcePosition position, int id); + + static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta); + static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, + intptr_t pc_delta); + + // Writes a single byte or word of data in the code stream. Used for + // inline tables, e.g., jump-tables. + void db(uint8_t data); + void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); + void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); + void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { + dq(data, rmode); + } + void dd(Label* label); + + Instruction* pc() const { return reinterpret_cast(pc_); } + + // Postpone the generation of the trampoline pool for the specified number of + // instructions. + void BlockTrampolinePoolFor(int instructions); + + // Check if there is less than kGap bytes available in the buffer. + // If this is the case, we need to grow the buffer before emitting + // an instruction or relocation information. + inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } + + // Get the number of bytes available in the buffer. + inline intptr_t available_space() const { + return reloc_info_writer.pos() - pc_; + } + + // Read/patch instructions. + static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } + static void instr_at_put(Address pc, Instr instr) { + *reinterpret_cast(pc) = instr; + } + Instr instr_at(int pos) { + return *reinterpret_cast(buffer_start_ + pos); + } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast(buffer_start_ + pos) = instr; + } + + void instr_at_put(int pos, ShortInstr instr) { + *reinterpret_cast(buffer_start_ + pos) = instr; + } + + Address toAddress(int pos) { + return reinterpret_cast
(buffer_start_ + pos); + } + + // Check if an instruction is a branch of some kind. + static bool IsBranch(Instr instr); + static bool IsCBranch(Instr instr); + static bool IsNop(Instr instr); + static bool IsJump(Instr instr); + static bool IsJal(Instr instr); + static bool IsCJal(Instr instr); + static bool IsJalr(Instr instr); + static bool IsLui(Instr instr); + static bool IsAuipc(Instr instr); + static bool IsAddiw(Instr instr); + static bool IsAddi(Instr instr); + static bool IsOri(Instr instr); + static bool IsSlli(Instr instr); + static bool IsLd(Instr instr); + void CheckTrampolinePool(); + + // Get the code target object for a pc-relative call or jump. + V8_INLINE Handle relative_code_target_object_handle_at( + Address pc_) const; + + inline int UnboundLabelsCount() { return unbound_labels_count_; } + + using BlockPoolsScope = BlockTrampolinePoolScope; + + void RecordConstPool(int size); + + void ForceConstantPoolEmissionWithoutJump() { + constpool_.Check(Emission::kForced, Jump::kOmitted); + } + void ForceConstantPoolEmissionWithJump() { + constpool_.Check(Emission::kForced, Jump::kRequired); + } + // Check if the const pool needs to be emitted while pretending that {margin} + // more bytes of instructions have already been emitted. + void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) { + constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin); + } + + void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) { + constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin); + } + + void RecordEntry(uint32_t data, RelocInfo::Mode rmode) { + constpool_.RecordEntry(data, rmode); + } + + void RecordEntry(uint64_t data, RelocInfo::Mode rmode) { + constpool_.RecordEntry(data, rmode); + } + + friend class VectorUnit; + class VectorUnit { + public: + inline int32_t sew() const { return 2 ^ (sew_ + 3); } + + inline int32_t vlmax() const { + if ((lmul_ & 0b100) != 0) { + return (kRvvVLEN / sew()) >> (lmul_ & 0b11); + } else { + return ((kRvvVLEN << lmul_) / sew()); + } + } + + explicit VectorUnit(Assembler* assm) : assm_(assm) {} + + void set(Register rd, VSew sew, Vlmul lmul) { + if (sew != sew_ || lmul != lmul_ || vl != vlmax()) { + sew_ = sew; + lmul_ = lmul; + vl = vlmax(); + assm_->vsetvlmax(rd, sew_, lmul_); + } + } + + void set(Register rd, int8_t sew, int8_t lmul) { + DCHECK_GE(sew, E8); + DCHECK_LE(sew, E64); + DCHECK_GE(lmul, m1); + DCHECK_LE(lmul, mf2); + set(rd, VSew(sew), Vlmul(lmul)); + } + + void set(RoundingMode mode) { + if (mode_ != mode) { + assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift); + assm_->fscsr(kScratchReg); + mode_ = mode; + } + } + void set(Register rd, Register rs1, VSew sew, Vlmul lmul) { + if (sew != sew_ || lmul != lmul_) { + sew_ = sew; + lmul_ = lmul; + vl = 0; + assm_->vsetvli(rd, rs1, sew_, lmul_); + } + } + + void set(VSew sew, Vlmul lmul) { + if (sew != sew_ || lmul != lmul_) { + sew_ = sew; + lmul_ = lmul; + assm_->vsetvl(sew_, lmul_); + } + } + + private: + VSew sew_ = E8; + Vlmul lmul_ = m1; + int32_t vl = 0; + Assembler* assm_; + RoundingMode mode_ = RNE; + }; + + VectorUnit VU; + + void CheckTrampolinePoolQuick(int extra_instructions = 0) { + DEBUG_PRINTF("\tpc_offset:%d %d\n", pc_offset(), + next_buffer_check_ - extra_instructions * kInstrSize); + if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { + CheckTrampolinePool(); + } + } + + protected: + // Readable constants for base and offset adjustment helper, these indicate if + // aside from offset, another value like offset + 4 should fit into int16. + enum class OffsetAccessType : bool { + SINGLE_ACCESS = false, + TWO_ACCESSES = true + }; + + // Determine whether need to adjust base and offset of memroy load/store + bool NeedAdjustBaseAndOffset( + const MemOperand& src, OffsetAccessType = OffsetAccessType::SINGLE_ACCESS, + int second_Access_add_to_offset = 4); + + // Helper function for memory load/store using base register and offset. + void AdjustBaseAndOffset( + MemOperand* src, Register scratch, + OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, + int second_access_add_to_offset = 4); + + inline static void set_target_internal_reference_encoded_at(Address pc, + Address target); + + int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } + + // Decode branch instruction at pos and return branch target pos. + int target_at(int pos, bool is_internal); + + // Patch branch instruction at pos to branch to given branch target pos. + void target_at_put(int pos, int target_pos, bool is_internal, + bool trampoline = false); + + // Say if we need to relocate with this mode. + bool MustUseReg(RelocInfo::Mode rmode); + + // Record reloc info for current pc_. + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + + // Block the emission of the trampoline pool before pc_offset. + void BlockTrampolinePoolBefore(int pc_offset) { + if (no_trampoline_pool_before_ < pc_offset) + no_trampoline_pool_before_ = pc_offset; + } + + void StartBlockTrampolinePool() { + DEBUG_PRINTF("\tStartBlockTrampolinePool\n"); + trampoline_pool_blocked_nesting_++; + } + + void EndBlockTrampolinePool() { + trampoline_pool_blocked_nesting_--; + DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n", + trampoline_pool_blocked_nesting_); + if (trampoline_pool_blocked_nesting_ == 0) { + CheckTrampolinePoolQuick(1); + } + } + + bool is_trampoline_pool_blocked() const { + return trampoline_pool_blocked_nesting_ > 0; + } + + bool has_exception() const { return internal_trampoline_exception_; } + + bool is_trampoline_emitted() const { return trampoline_emitted_; } + + // Temporarily block automatic assembly buffer growth. + void StartBlockGrowBuffer() { + DCHECK(!block_buffer_growth_); + block_buffer_growth_ = true; + } + + void EndBlockGrowBuffer() { + DCHECK(block_buffer_growth_); + block_buffer_growth_ = false; + } + + bool is_buffer_growth_blocked() const { return block_buffer_growth_; } + + private: + void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul, + TailAgnosticType tail = tu, MaskAgnosticType mask = mu); + + void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul, + TailAgnosticType tail = tu, MaskAgnosticType mask = mu); + + inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul, + TailAgnosticType tail = tu, + MaskAgnosticType mask = mu) { + vsetvli(rd, zero_reg, vsew, vlmul, tu, mu); + } + + inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu, + MaskAgnosticType mask = mu) { + vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu); + } + + void vsetvl(Register rd, Register rs1, Register rs2); + + // Avoid overflows for displacements etc. + static const int kMaximalBufferSize = 512 * MB; + + // Buffer size and constant pool distance are checked together at regular + // intervals of kBufferCheckInterval emitted bytes. + static constexpr int kBufferCheckInterval = 1 * KB / 2; + + // Code generation. + // The relocation writer's position is at least kGap bytes below the end of + // the generated instructions. This is so that multi-instruction sequences do + // not have to check for overflow. The same is true for writes of large + // relocation info entries. + static constexpr int kGap = 64; + STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); + + // Repeated checking whether the trampoline pool should be emitted is rather + // expensive. By default we only check again once a number of instructions + // has been generated. + static constexpr int kCheckConstIntervalInst = 32; + static constexpr int kCheckConstInterval = + kCheckConstIntervalInst * kInstrSize; + + int next_buffer_check_; // pc offset of next buffer check. + + // Emission of the trampoline pool may be blocked in some code sequences. + int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. + int no_trampoline_pool_before_; // Block emission before this pc offset. + + // Keep track of the last emitted pool to guarantee a maximal distance. + int last_trampoline_pool_end_; // pc offset of the end of the last pool. + + // Automatic growth of the assembly buffer may be blocked for some sequences. + bool block_buffer_growth_; // Block growth when true. + + // Relocation information generation. + // Each relocation is encoded as a variable size value. + static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; + RelocInfoWriter reloc_info_writer; + + // The bound position, before this we cannot do instruction elimination. + int last_bound_pos_; + + // Code emission. + inline void CheckBuffer(); + void GrowBuffer(); + inline void emit(Instr x); + inline void emit(ShortInstr x); + inline void emit(uint64_t x); + template + inline void EmitHelper(T x); + + static void disassembleInstr(Instr instr); + + // Instruction generation. + + // ----- Top-level instruction formats match those in the ISA manual + // (R, I, S, B, U, J). These match the formats defined in LLVM's + // RISCVInstrFormats.td. + void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, Register rd, + Register rs1, Register rs2); + void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, FPURegister rd, + FPURegister rs1, FPURegister rs2); + void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, Register rd, + FPURegister rs1, Register rs2); + void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, FPURegister rd, + Register rs1, Register rs2); + void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, FPURegister rd, + FPURegister rs1, Register rs2); + void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, Register rd, + FPURegister rs1, FPURegister rs2); + void GenInstrR4(uint8_t funct2, Opcode opcode, Register rd, Register rs1, + Register rs2, Register rs3, RoundingMode frm); + void GenInstrR4(uint8_t funct2, Opcode opcode, FPURegister rd, + FPURegister rs1, FPURegister rs2, FPURegister rs3, + RoundingMode frm); + void GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, uint8_t funct3, + Register rd, Register rs1, Register rs2); + void GenInstrRFrm(uint8_t funct7, Opcode opcode, Register rd, Register rs1, + Register rs2, RoundingMode frm); + void GenInstrI(uint8_t funct3, Opcode opcode, Register rd, Register rs1, + int16_t imm12); + void GenInstrI(uint8_t funct3, Opcode opcode, FPURegister rd, Register rs1, + int16_t imm12); + void GenInstrIShift(bool arithshift, uint8_t funct3, Opcode opcode, + Register rd, Register rs1, uint8_t shamt); + void GenInstrIShiftW(bool arithshift, uint8_t funct3, Opcode opcode, + Register rd, Register rs1, uint8_t shamt); + void GenInstrS(uint8_t funct3, Opcode opcode, Register rs1, Register rs2, + int16_t imm12); + void GenInstrS(uint8_t funct3, Opcode opcode, Register rs1, FPURegister rs2, + int16_t imm12); + void GenInstrB(uint8_t funct3, Opcode opcode, Register rs1, Register rs2, + int16_t imm12); + void GenInstrU(Opcode opcode, Register rd, int32_t imm20); + void GenInstrJ(Opcode opcode, Register rd, int32_t imm20); + void GenInstrCR(uint8_t funct4, Opcode opcode, Register rd, Register rs2); + void GenInstrCA(uint8_t funct6, Opcode opcode, Register rd, uint8_t funct, + Register rs2); + void GenInstrCI(uint8_t funct3, Opcode opcode, Register rd, int8_t imm6); + void GenInstrCIU(uint8_t funct3, Opcode opcode, Register rd, uint8_t uimm6); + void GenInstrCIU(uint8_t funct3, Opcode opcode, FPURegister rd, + uint8_t uimm6); + void GenInstrCIW(uint8_t funct3, Opcode opcode, Register rd, uint8_t uimm8); + void GenInstrCSS(uint8_t funct3, Opcode opcode, FPURegister rs2, + uint8_t uimm6); + void GenInstrCSS(uint8_t funct3, Opcode opcode, Register rs2, uint8_t uimm6); + void GenInstrCL(uint8_t funct3, Opcode opcode, Register rd, Register rs1, + uint8_t uimm5); + void GenInstrCL(uint8_t funct3, Opcode opcode, FPURegister rd, Register rs1, + uint8_t uimm5); + void GenInstrCS(uint8_t funct3, Opcode opcode, Register rs2, Register rs1, + uint8_t uimm5); + void GenInstrCS(uint8_t funct3, Opcode opcode, FPURegister rs2, Register rs1, + uint8_t uimm5); + void GenInstrCJ(uint8_t funct3, Opcode opcode, uint16_t uint11); + void GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1, uint8_t uimm8); + void GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode, Register rs1, + int8_t imm6); + + // ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td + void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2, + int16_t imm12); + void GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1, + int16_t imm12); + void GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2, + int16_t imm12); + void GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, int16_t imm12); + void GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd, + Register rs1, uint8_t shamt); + void GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, Register rs1, + Register rs2); + void GenInstrCSR_ir(uint8_t funct3, Register rd, ControlStatusReg csr, + Register rs1); + void GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr, + uint8_t rs1); + void GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd, + Register rs1, uint8_t shamt); + void GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd, + Register rs1, Register rs2); + void GenInstrPriv(uint8_t funct7, Register rs1, Register rs2); + void GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1, + int16_t imm12); + void GenInstrStoreFP_rri(uint8_t funct3, Register rs1, FPURegister rs2, + int16_t imm12); + void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd, + FPURegister rs1, FPURegister rs2); + void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd, + Register rs1, Register rs2); + void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd, + FPURegister rs1, Register rs2); + void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd, + FPURegister rs1, Register rs2); + void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd, + FPURegister rs1, FPURegister rs2); + + // ----------------------------RVV------------------------------------------ + // vsetvl + void GenInstrV(Register rd, Register rs1, Register rs2); + // vsetvli + void GenInstrV(Register rd, Register rs1, uint32_t zimm); + // OPIVV OPFVV OPMVV + void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs1, + VRegister vs2, MaskType mask = NoMask); + void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, int8_t vs1, + VRegister vs2, MaskType mask = NoMask); + void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs2, + MaskType mask = NoMask); + // OPMVV OPFVV + void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1, + VRegister vs2, MaskType mask = NoMask); + // OPFVV + void GenInstrV(uint8_t funct6, Opcode opcode, FPURegister fd, VRegister vs1, + VRegister vs2, MaskType mask = NoMask); + + // OPIVX OPMVX + void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1, + VRegister vs2, MaskType mask = NoMask); + // OPFVF + void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, FPURegister fs1, + VRegister vs2, MaskType mask = NoMask); + // OPMVX + void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2, + MaskType mask = NoMask); + // OPIVI + void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2, + MaskType mask = NoMask); + + // VL VS + void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1, + uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew, + uint8_t Nf); + + void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1, + Register rs2, MaskType mask, uint8_t IsMop, bool IsMew, + uint8_t Nf); + // VL VS AMO + void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1, + VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew, + uint8_t Nf); + // vmv_xs vcpop_m vfirst_m + void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, uint8_t vs1, + VRegister vs2, MaskType mask); + // Labels. + void print(const Label* L); + void bind_to(Label* L, int pos); + void next(Label* L, bool is_internal); + + // One trampoline consists of: + // - space for trampoline slots, + // - space for labels. + // + // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. + // Space for trampoline slots precedes space for labels. Each label is of one + // instruction size, so total amount for labels is equal to + // label_count * kInstrSize. + class Trampoline { + public: + Trampoline() { + start_ = 0; + next_slot_ = 0; + free_slot_count_ = 0; + end_ = 0; + } + Trampoline(int start, int slot_count) { + start_ = start; + next_slot_ = start; + free_slot_count_ = slot_count; + end_ = start + slot_count * kTrampolineSlotsSize; + } + int start() { return start_; } + int end() { return end_; } + int take_slot() { + int trampoline_slot = kInvalidSlotPos; + if (free_slot_count_ <= 0) { + // We have run out of space on trampolines. + // Make sure we fail in debug mode, so we become aware of each case + // when this happens. + DCHECK(0); + // Internal exception will be caught. + } else { + trampoline_slot = next_slot_; + free_slot_count_--; + next_slot_ += kTrampolineSlotsSize; + } + return trampoline_slot; + } + + private: + int start_; + int end_; + int next_slot_; + int free_slot_count_; + }; + + int32_t get_trampoline_entry(int32_t pos); + int unbound_labels_count_; + // After trampoline is emitted, long branches are used in generated code for + // the forward branches whose target offsets could be beyond reach of branch + // instruction. We use this information to trigger different mode of + // branch instruction generation, where we use jump instructions rather + // than regular branch instructions. + bool trampoline_emitted_ = false; + static constexpr int kInvalidSlotPos = -1; + + // Internal reference positions, required for unbounded internal reference + // labels. + std::set internal_reference_positions_; + bool is_internal_reference(Label* L) { + return internal_reference_positions_.find(L->pos()) != + internal_reference_positions_.end(); + } + + Trampoline trampoline_; + bool internal_trampoline_exception_; + + RegList scratch_register_list_; + + private: + ConstantPool constpool_; + + void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); + + int WriteCodeComments(); + + friend class RegExpMacroAssemblerRISCV; + friend class RelocInfo; + friend class BlockTrampolinePoolScope; + friend class EnsureSpace; + friend class ConstantPool; +}; + +class EnsureSpace { + public: + explicit inline EnsureSpace(Assembler* assembler); +}; + +class V8_EXPORT_PRIVATE UseScratchRegisterScope { + public: + explicit UseScratchRegisterScope(Assembler* assembler); + ~UseScratchRegisterScope(); + + Register Acquire(); + bool hasAvailable() const; + void Include(const RegList& list) { *available_ |= list; } + void Exclude(const RegList& list) { + *available_ &= RegList::FromBits(~list.bits()); + } + void Include(const Register& reg1, const Register& reg2 = no_reg) { + RegList list({reg1, reg2}); + Include(list); + } + void Exclude(const Register& reg1, const Register& reg2 = no_reg) { + RegList list({reg1, reg2}); + Exclude(list); + } + + private: + RegList* available_; + RegList old_available_; +}; + +class LoadStoreLaneParams { + public: + int sz; + uint8_t laneidx; + + LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx); + + private: + LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes) + : sz(sz), laneidx(laneidx % lanes) {} +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_H_ diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.cc b/deps/v8/src/codegen/riscv64/constants-riscv64.cc new file mode 100644 index 00000000000000..655a97c12f58eb --- /dev/null +++ b/deps/v8/src/codegen/riscv64/constants-riscv64.cc @@ -0,0 +1,245 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_RISCV64 + +#include "src/codegen/riscv64/constants-riscv64.h" + +namespace v8 { +namespace internal { + +// ----------------------------------------------------------------------------- +// Registers. + +// These register names are defined in a way to match the native disassembler +// formatting. See for example the command "objdump -d ". +const char* Registers::names_[kNumSimuRegisters] = { + "zero_reg", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "s1", "a0", + "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5", + "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", "pc"}; + +// List of alias names which can be used when referring to RISC-V registers. +const Registers::RegisterAlias Registers::aliases_[] = { + {0, "zero"}, + {33, "pc"}, + {8, "s0"}, + {8, "s0_fp"}, + {kInvalidRegister, nullptr}}; + +const char* Registers::Name(int reg) { + const char* result; + if ((0 <= reg) && (reg < kNumSimuRegisters)) { + result = names_[reg]; + } else { + result = "noreg"; + } + return result; +} + +int Registers::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumSimuRegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].reg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].reg; + } + i++; + } + + // No register with the reguested name found. + return kInvalidRegister; +} + +/* +const char* FPURegisters::names_[kNumFPURegisters] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", + "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", + "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; +*/ +const char* FPURegisters::names_[kNumFPURegisters] = { + "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", + "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", + "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", + "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"}; + +// List of alias names which can be used when referring to RISC-V FP registers. +const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { + {kInvalidRegister, nullptr}}; + +const char* FPURegisters::Name(int creg) { + const char* result; + if ((0 <= creg) && (creg < kNumFPURegisters)) { + result = names_[creg]; + } else { + result = "nocreg"; + } + return result; +} + +int FPURegisters::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumFPURegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].creg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].creg; + } + i++; + } + + // No Cregister with the reguested name found. + return kInvalidFPURegister; +} + +const char* VRegisters::names_[kNumVRegisters] = { + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", + "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", + "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"}; + +const VRegisters::RegisterAlias VRegisters::aliases_[] = { + {kInvalidRegister, nullptr}}; + +const char* VRegisters::Name(int creg) { + const char* result; + if ((0 <= creg) && (creg < kNumVRegisters)) { + result = names_[creg]; + } else { + result = "nocreg"; + } + return result; +} + +int VRegisters::Number(const char* name) { + // Look through the canonical names. + for (int i = 0; i < kNumVRegisters; i++) { + if (strcmp(names_[i], name) == 0) { + return i; + } + } + + // Look through the alias names. + int i = 0; + while (aliases_[i].creg != kInvalidRegister) { + if (strcmp(aliases_[i].name, name) == 0) { + return aliases_[i].creg; + } + i++; + } + + // No Cregister with the reguested name found. + return kInvalidVRegister; +} + +InstructionBase::Type InstructionBase::InstructionType() const { + if (IsIllegalInstruction()) { + return kUnsupported; + } + // RV64C Instruction + if (FLAG_riscv_c_extension && IsShortInstruction()) { + switch (InstructionBits() & kRvcOpcodeMask) { + case RO_C_ADDI4SPN: + return kCIWType; + case RO_C_FLD: + case RO_C_LW: + case RO_C_LD: + return kCLType; + case RO_C_FSD: + case RO_C_SW: + case RO_C_SD: + return kCSType; + case RO_C_NOP_ADDI: + case RO_C_ADDIW: + case RO_C_LI: + case RO_C_LUI_ADD: + return kCIType; + case RO_C_MISC_ALU: + if (Bits(11, 10) != 0b11) + return kCBType; + else + return kCAType; + case RO_C_J: + return kCJType; + case RO_C_BEQZ: + case RO_C_BNEZ: + return kCBType; + case RO_C_SLLI: + case RO_C_FLDSP: + case RO_C_LWSP: + case RO_C_LDSP: + return kCIType; + case RO_C_JR_MV_ADD: + return kCRType; + case RO_C_FSDSP: + case RO_C_SWSP: + case RO_C_SDSP: + return kCSSType; + default: + break; + } + } else { + // RISCV routine + switch (InstructionBits() & kBaseOpcodeMask) { + case LOAD: + return kIType; + case LOAD_FP: + return kIType; + case MISC_MEM: + return kIType; + case OP_IMM: + return kIType; + case AUIPC: + return kUType; + case OP_IMM_32: + return kIType; + case STORE: + return kSType; + case STORE_FP: + return kSType; + case AMO: + return kRType; + case OP: + return kRType; + case LUI: + return kUType; + case OP_32: + return kRType; + case MADD: + case MSUB: + case NMSUB: + case NMADD: + return kR4Type; + case OP_FP: + return kRType; + case BRANCH: + return kBType; + case JALR: + return kIType; + case JAL: + return kJType; + case SYSTEM: + return kIType; + case OP_V: + return kVType; + } + } + return kUnsupported; +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_RISCV64 diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h new file mode 100644 index 00000000000000..67856b771b05ca --- /dev/null +++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h @@ -0,0 +1,1986 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_RISCV64_CONSTANTS_RISCV64_H_ +#define V8_CODEGEN_RISCV64_CONSTANTS_RISCV64_H_ + +#include "src/base/logging.h" +#include "src/base/macros.h" +#include "src/common/globals.h" +#include "src/flags/flags.h" + +// UNIMPLEMENTED_ macro for RISCV. +#ifdef DEBUG +#define UNIMPLEMENTED_RISCV() \ + v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ + __FILE__, __LINE__, __func__); +#else +#define UNIMPLEMENTED_RISCV() +#endif + +#define UNSUPPORTED_RISCV() \ + v8::internal::PrintF("Unsupported instruction %d.\n", __LINE__) + +enum Endianness { kLittle, kBig }; + +#if defined(V8_TARGET_LITTLE_ENDIAN) +static const Endianness kArchEndian = kLittle; +#elif defined(V8_TARGET_BIG_ENDIAN) +static const Endianness kArchEndian = kBig; +#else +#error Unknown endianness +#endif + +#if defined(V8_TARGET_LITTLE_ENDIAN) +const uint32_t kLeastSignificantByteInInt32Offset = 0; +const uint32_t kLessSignificantWordInDoublewordOffset = 0; +#elif defined(V8_TARGET_BIG_ENDIAN) +const uint32_t kLeastSignificantByteInInt32Offset = 3; +const uint32_t kLessSignificantWordInDoublewordOffset = 4; +#else +#error Unknown endianness +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include + +// Defines constants and accessor classes to assemble, disassemble and +// simulate RISC-V instructions. +// +// See: The RISC-V Instruction Set Manual +// Volume I: User-Level ISA +// Try https://content.riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf. + +namespace v8 { +namespace internal { + +constexpr size_t kMaxPCRelativeCodeRangeInMB = 4094; + +// ----------------------------------------------------------------------------- +// Registers and FPURegisters. + +// Number of general purpose registers. +const int kNumRegisters = 32; +const int kInvalidRegister = -1; + +// Number of registers with pc. +const int kNumSimuRegisters = 33; + +// In the simulator, the PC register is simulated as the 34th register. +const int kPCRegister = 34; + +// Number coprocessor registers. +const int kNumFPURegisters = 32; +const int kInvalidFPURegister = -1; + +// Number vectotr registers +const int kNumVRegisters = 32; +const int kInvalidVRegister = -1; +// 'pref' instruction hints +const int32_t kPrefHintLoad = 0; +const int32_t kPrefHintStore = 1; +const int32_t kPrefHintLoadStreamed = 4; +const int32_t kPrefHintStoreStreamed = 5; +const int32_t kPrefHintLoadRetained = 6; +const int32_t kPrefHintStoreRetained = 7; +const int32_t kPrefHintWritebackInvalidate = 25; +const int32_t kPrefHintPrepareForStore = 30; + +// Actual value of root register is offset from the root array's start +// to take advantage of negative displacement values. +// TODO(sigurds): Choose best value. +constexpr int kRootRegisterBias = 256; + +// Helper functions for converting between register numbers and names. +class Registers { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int reg; + const char* name; + }; + + static const int64_t kMaxValue = 0x7fffffffffffffffl; + static const int64_t kMinValue = 0x8000000000000000l; + + private: + static const char* names_[kNumSimuRegisters]; + static const RegisterAlias aliases_[]; +}; + +// Helper functions for converting between register numbers and names. +class FPURegisters { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int creg; + const char* name; + }; + + private: + static const char* names_[kNumFPURegisters]; + static const RegisterAlias aliases_[]; +}; + +class VRegisters { + public: + // Return the name of the register. + static const char* Name(int reg); + + // Lookup the register number for the name provided. + static int Number(const char* name); + + struct RegisterAlias { + int creg; + const char* name; + }; + + private: + static const char* names_[kNumVRegisters]; + static const RegisterAlias aliases_[]; +}; + +// ----------------------------------------------------------------------------- +// Instructions encoding constants. + +// On RISCV all instructions are 32 bits, except for RVC. +using Instr = int32_t; +using ShortInstr = int16_t; + +// Special Software Interrupt codes when used in the presence of the RISC-V +// simulator. +enum SoftwareInterruptCodes { + // Transition to C code. + call_rt_redirected = 0xfffff +}; + +// On RISC-V Simulator breakpoints can have different codes: +// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, +// the simulator will run through them and print the registers. +// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() +// instructions (see Assembler::stop()). +// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the +// debugger. +const uint32_t kMaxWatchpointCode = 31; +const uint32_t kMaxStopCode = 127; +STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode); + +// ----- Fields offset and length. +// RISCV constants +const int kBaseOpcodeShift = 0; +const int kBaseOpcodeBits = 7; +const int kFunct7Shift = 25; +const int kFunct7Bits = 7; +const int kFunct5Shift = 27; +const int kFunct5Bits = 5; +const int kFunct3Shift = 12; +const int kFunct3Bits = 3; +const int kFunct2Shift = 25; +const int kFunct2Bits = 2; +const int kRs1Shift = 15; +const int kRs1Bits = 5; +const int kVs1Shift = 15; +const int kVs1Bits = 5; +const int kVs2Shift = 20; +const int kVs2Bits = 5; +const int kVdShift = 7; +const int kVdBits = 5; +const int kRs2Shift = 20; +const int kRs2Bits = 5; +const int kRs3Shift = 27; +const int kRs3Bits = 5; +const int kRdShift = 7; +const int kRdBits = 5; +const int kRlShift = 25; +const int kAqShift = 26; +const int kImm12Shift = 20; +const int kImm12Bits = 12; +const int kImm11Shift = 2; +const int kImm11Bits = 11; +const int kShamtShift = 20; +const int kShamtBits = 5; +const int kShamtWShift = 20; +const int kShamtWBits = 6; +const int kArithShiftShift = 30; +const int kImm20Shift = 12; +const int kImm20Bits = 20; +const int kCsrShift = 20; +const int kCsrBits = 12; +const int kMemOrderBits = 4; +const int kPredOrderShift = 24; +const int kSuccOrderShift = 20; +// for C extension +const int kRvcFunct4Shift = 12; +const int kRvcFunct4Bits = 4; +const int kRvcFunct3Shift = 13; +const int kRvcFunct3Bits = 3; +const int kRvcRs1Shift = 7; +const int kRvcRs1Bits = 5; +const int kRvcRs2Shift = 2; +const int kRvcRs2Bits = 5; +const int kRvcRdShift = 7; +const int kRvcRdBits = 5; +const int kRvcRs1sShift = 7; +const int kRvcRs1sBits = 3; +const int kRvcRs2sShift = 2; +const int kRvcRs2sBits = 3; +const int kRvcFunct2Shift = 5; +const int kRvcFunct2BShift = 10; +const int kRvcFunct2Bits = 2; +const int kRvcFunct6Shift = 10; +const int kRvcFunct6Bits = 6; + +// for RVV extension +constexpr int kRvvELEN = 64; +constexpr int kRvvVLEN = 128; +constexpr int kRvvSLEN = kRvvVLEN; +const int kRvvFunct6Shift = 26; +const int kRvvFunct6Bits = 6; +const uint32_t kRvvFunct6Mask = + (((1 << kRvvFunct6Bits) - 1) << kRvvFunct6Shift); + +const int kRvvVmBits = 1; +const int kRvvVmShift = 25; +const uint32_t kRvvVmMask = (((1 << kRvvVmBits) - 1) << kRvvVmShift); + +const int kRvvVs2Bits = 5; +const int kRvvVs2Shift = 20; +const uint32_t kRvvVs2Mask = (((1 << kRvvVs2Bits) - 1) << kRvvVs2Shift); + +const int kRvvVs1Bits = 5; +const int kRvvVs1Shift = 15; +const uint32_t kRvvVs1Mask = (((1 << kRvvVs1Bits) - 1) << kRvvVs1Shift); + +const int kRvvRs1Bits = kRvvVs1Bits; +const int kRvvRs1Shift = kRvvVs1Shift; +const uint32_t kRvvRs1Mask = (((1 << kRvvRs1Bits) - 1) << kRvvRs1Shift); + +const int kRvvRs2Bits = 5; +const int kRvvRs2Shift = 20; +const uint32_t kRvvRs2Mask = (((1 << kRvvRs2Bits) - 1) << kRvvRs2Shift); + +const int kRvvImm5Bits = kRvvVs1Bits; +const int kRvvImm5Shift = kRvvVs1Shift; +const uint32_t kRvvImm5Mask = (((1 << kRvvImm5Bits) - 1) << kRvvImm5Shift); + +const int kRvvVdBits = 5; +const int kRvvVdShift = 7; +const uint32_t kRvvVdMask = (((1 << kRvvVdBits) - 1) << kRvvVdShift); + +const int kRvvRdBits = kRvvVdBits; +const int kRvvRdShift = kRvvVdShift; +const uint32_t kRvvRdMask = (((1 << kRvvRdBits) - 1) << kRvvRdShift); + +const int kRvvZimmBits = 11; +const int kRvvZimmShift = 20; +const uint32_t kRvvZimmMask = (((1 << kRvvZimmBits) - 1) << kRvvZimmShift); + +const int kRvvUimmShift = kRvvRs1Shift; +const int kRvvUimmBits = kRvvRs1Bits; +const uint32_t kRvvUimmMask = (((1 << kRvvUimmBits) - 1) << kRvvUimmShift); + +const int kRvvWidthBits = 3; +const int kRvvWidthShift = 12; +const uint32_t kRvvWidthMask = (((1 << kRvvWidthBits) - 1) << kRvvWidthShift); + +const int kRvvMopBits = 2; +const int kRvvMopShift = 26; +const uint32_t kRvvMopMask = (((1 << kRvvMopBits) - 1) << kRvvMopShift); + +const int kRvvMewBits = 1; +const int kRvvMewShift = 28; +const uint32_t kRvvMewMask = (((1 << kRvvMewBits) - 1) << kRvvMewShift); + +const int kRvvNfBits = 3; +const int kRvvNfShift = 29; +const uint32_t kRvvNfMask = (((1 << kRvvNfBits) - 1) << kRvvNfShift); + +// RISCV Instruction bit masks +const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1) + << kBaseOpcodeShift; +const uint32_t kFunct3Mask = ((1 << kFunct3Bits) - 1) << kFunct3Shift; +const uint32_t kFunct5Mask = ((1 << kFunct5Bits) - 1) << kFunct5Shift; +const uint32_t kFunct7Mask = ((1 << kFunct7Bits) - 1) << kFunct7Shift; +const uint32_t kFunct2Mask = 0b11 << kFunct7Shift; +const uint32_t kRTypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct7Mask; +const uint32_t kRATypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct5Mask; +const uint32_t kRFPTypeMask = kBaseOpcodeMask | kFunct7Mask; +const uint32_t kR4TypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct2Mask; +const uint32_t kITypeMask = kBaseOpcodeMask | kFunct3Mask; +const uint32_t kSTypeMask = kBaseOpcodeMask | kFunct3Mask; +const uint32_t kBTypeMask = kBaseOpcodeMask | kFunct3Mask; +const uint32_t kUTypeMask = kBaseOpcodeMask; +const uint32_t kJTypeMask = kBaseOpcodeMask; +const uint32_t kVTypeMask = kRvvFunct6Mask | kFunct3Mask | kBaseOpcodeMask; +const uint32_t kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift; +const uint32_t kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift; +const uint32_t kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift; +const uint32_t kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; +const uint32_t kBImm12Mask = kFunct7Mask | kRdFieldMask; +const uint32_t kImm20Mask = ((1 << kImm20Bits) - 1) << kImm20Shift; +const uint32_t kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift; +const uint32_t kImm11Mask = ((1 << kImm11Bits) - 1) << kImm11Shift; +const uint32_t kImm31_12Mask = ((1 << 20) - 1) << 12; +const uint32_t kImm19_0Mask = ((1 << 20) - 1); +const uint32_t kRvcOpcodeMask = + 0b11 | (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift); +const uint32_t kRvcFunct3Mask = + (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift); +const uint32_t kRvcFunct4Mask = + (((1 << kRvcFunct4Bits) - 1) << kRvcFunct4Shift); +const uint32_t kRvcFunct6Mask = + (((1 << kRvcFunct6Bits) - 1) << kRvcFunct6Shift); +const uint32_t kRvcFunct2Mask = + (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2Shift); +const uint32_t kRvcFunct2BMask = + (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2BShift); +const uint32_t kCRTypeMask = kRvcOpcodeMask | kRvcFunct4Mask; +const uint32_t kCSTypeMask = kRvcOpcodeMask | kRvcFunct6Mask; +const uint32_t kCATypeMask = kRvcOpcodeMask | kRvcFunct6Mask | kRvcFunct2Mask; +const uint32_t kRvcBImm8Mask = (((1 << 5) - 1) << 2) | (((1 << 3) - 1) << 10); + +// RISCV CSR related bit mask and shift +const int kFcsrFlagsBits = 5; +const uint32_t kFcsrFlagsMask = (1 << kFcsrFlagsBits) - 1; +const int kFcsrFrmBits = 3; +const int kFcsrFrmShift = kFcsrFlagsBits; +const uint32_t kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift; +const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits; +const uint32_t kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask; + +const int kNopByte = 0x00000013; +// Original MIPS constants +// TODO(RISCV): to be cleaned up +const int kImm16Shift = 0; +const int kImm16Bits = 16; +const uint32_t kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; +// end of TODO(RISCV): to be cleaned up + +// ----- RISCV Base Opcodes + +enum BaseOpcode : uint32_t {}; + +// ----- RISC-V Opcodes and Function Fields. +enum Opcode : uint32_t { + LOAD = 0b0000011, // I form: LB LH LW LBU LHU + LOAD_FP = 0b0000111, // I form: FLW FLD FLQ + MISC_MEM = 0b0001111, // I special form: FENCE FENCE.I + OP_IMM = 0b0010011, // I form: ADDI SLTI SLTIU XORI ORI ANDI SLLI SRLI SARI + // Note: SLLI/SRLI/SRAI I form first, then func3 001/101 => R type + AUIPC = 0b0010111, // U form: AUIPC + OP_IMM_32 = 0b0011011, // I form: ADDIW SLLIW SRLIW SRAIW + // Note: SRLIW SRAIW I form first, then func3 101 special shift encoding + STORE = 0b0100011, // S form: SB SH SW SD + STORE_FP = 0b0100111, // S form: FSW FSD FSQ + AMO = 0b0101111, // R form: All A instructions + OP = 0b0110011, // R: ADD SUB SLL SLT SLTU XOR SRL SRA OR AND and 32M set + LUI = 0b0110111, // U form: LUI + OP_32 = 0b0111011, // R: ADDW SUBW SLLW SRLW SRAW MULW DIVW DIVUW REMW REMUW + MADD = 0b1000011, // R4 type: FMADD.S FMADD.D FMADD.Q + MSUB = 0b1000111, // R4 type: FMSUB.S FMSUB.D FMSUB.Q + NMSUB = 0b1001011, // R4 type: FNMSUB.S FNMSUB.D FNMSUB.Q + NMADD = 0b1001111, // R4 type: FNMADD.S FNMADD.D FNMADD.Q + OP_FP = 0b1010011, // R type: Q ext + BRANCH = 0b1100011, // B form: BEQ BNE, BLT, BGE, BLTU BGEU + JALR = 0b1100111, // I form: JALR + JAL = 0b1101111, // J form: JAL + SYSTEM = 0b1110011, // I form: ECALL EBREAK Zicsr ext + // C extension + C0 = 0b00, + C1 = 0b01, + C2 = 0b10, + FUNCT2_0 = 0b00, + FUNCT2_1 = 0b01, + FUNCT2_2 = 0b10, + FUNCT2_3 = 0b11, + + // Note use RO (RiscV Opcode) prefix + // RV32I Base Instruction Set + RO_LUI = LUI, + RO_AUIPC = AUIPC, + RO_JAL = JAL, + RO_JALR = JALR | (0b000 << kFunct3Shift), + RO_BEQ = BRANCH | (0b000 << kFunct3Shift), + RO_BNE = BRANCH | (0b001 << kFunct3Shift), + RO_BLT = BRANCH | (0b100 << kFunct3Shift), + RO_BGE = BRANCH | (0b101 << kFunct3Shift), + RO_BLTU = BRANCH | (0b110 << kFunct3Shift), + RO_BGEU = BRANCH | (0b111 << kFunct3Shift), + RO_LB = LOAD | (0b000 << kFunct3Shift), + RO_LH = LOAD | (0b001 << kFunct3Shift), + RO_LW = LOAD | (0b010 << kFunct3Shift), + RO_LBU = LOAD | (0b100 << kFunct3Shift), + RO_LHU = LOAD | (0b101 << kFunct3Shift), + RO_SB = STORE | (0b000 << kFunct3Shift), + RO_SH = STORE | (0b001 << kFunct3Shift), + RO_SW = STORE | (0b010 << kFunct3Shift), + RO_ADDI = OP_IMM | (0b000 << kFunct3Shift), + RO_SLTI = OP_IMM | (0b010 << kFunct3Shift), + RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift), + RO_XORI = OP_IMM | (0b100 << kFunct3Shift), + RO_ORI = OP_IMM | (0b110 << kFunct3Shift), + RO_ANDI = OP_IMM | (0b111 << kFunct3Shift), + RO_SLLI = OP_IMM | (0b001 << kFunct3Shift), + RO_SRLI = OP_IMM | (0b101 << kFunct3Shift), + // RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7 + RO_ADD = OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SUB = OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift), + RO_SLL = OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SLT = OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SLTU = OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_XOR = OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SRL = OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SRA = OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift), + RO_OR = OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_AND = OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift), + RO_ECALL = SYSTEM | (0b000 << kFunct3Shift), + // RO_EBREAK = SYSTEM | (0b000 << kFunct3Shift), // Same as ECALL, use imm12 + + // RV64I Base Instruction Set (in addition to RV32I) + RO_LWU = LOAD | (0b110 << kFunct3Shift), + RO_LD = LOAD | (0b011 << kFunct3Shift), + RO_SD = STORE | (0b011 << kFunct3Shift), + RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift), + RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift), + RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift), + // RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7 + RO_ADDW = OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SUBW = OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift), + RO_SLLW = OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SRLW = OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift), + RO_SRAW = OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift), + + // RV32/RV64 Zifencei Standard Extension + RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift), + + // RV32/RV64 Zicsr Standard Extension + RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift), + RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift), + RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift), + RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift), + RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift), + RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift), + + // RV32M Standard Extension + RO_MUL = OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_MULH = OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_MULHSU = OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_MULHU = OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_DIV = OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_DIVU = OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_REM = OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_REMU = OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + + // RV64M Standard Extension (in addition to RV32M) + RO_MULW = OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_DIVW = OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_DIVUW = OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_REMW = OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + RO_REMUW = OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift), + + // RV32A Standard Extension + RO_LR_W = AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift), + RO_SC_W = AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift), + RO_AMOSWAP_W = AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift), + RO_AMOADD_W = AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift), + RO_AMOXOR_W = AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift), + RO_AMOAND_W = AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift), + RO_AMOOR_W = AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift), + RO_AMOMIN_W = AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift), + RO_AMOMAX_W = AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift), + RO_AMOMINU_W = AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift), + RO_AMOMAXU_W = AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift), + + // RV64A Standard Extension (in addition to RV32A) + RO_LR_D = AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift), + RO_SC_D = AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift), + RO_AMOSWAP_D = AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift), + RO_AMOADD_D = AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift), + RO_AMOXOR_D = AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift), + RO_AMOAND_D = AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift), + RO_AMOOR_D = AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift), + RO_AMOMIN_D = AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift), + RO_AMOMAX_D = AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift), + RO_AMOMINU_D = AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift), + RO_AMOMAXU_D = AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift), + + // RV32F Standard Extension + RO_FLW = LOAD_FP | (0b010 << kFunct3Shift), + RO_FSW = STORE_FP | (0b010 << kFunct3Shift), + RO_FMADD_S = MADD | (0b00 << kFunct2Shift), + RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift), + RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift), + RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift), + RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift), + RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift), + RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift), + RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift), + RO_FSQRT_S = OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift), + RO_FSGNJ_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift), + RO_FSGNJN_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift), + RO_FSQNJX_S = OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift), + RO_FMIN_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift), + RO_FMAX_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift), + RO_FCVT_W_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift), + RO_FCVT_WU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift), + RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) | (0b000 << kFunct3Shift) | + (0b00000 << kRs2Shift), + RO_FEQ_S = OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift), + RO_FLT_S = OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift), + RO_FLE_S = OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift), + RO_FCLASS_S = OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift), + RO_FCVT_S_W = OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift), + RO_FCVT_S_WU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift), + RO_FMV_W_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift), + + // RV64F Standard Extension (in addition to RV32F) + RO_FCVT_L_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift), + RO_FCVT_LU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift), + RO_FCVT_S_L = OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift), + RO_FCVT_S_LU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift), + + // RV32D Standard Extension + RO_FLD = LOAD_FP | (0b011 << kFunct3Shift), + RO_FSD = STORE_FP | (0b011 << kFunct3Shift), + RO_FMADD_D = MADD | (0b01 << kFunct2Shift), + RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift), + RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift), + RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift), + RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift), + RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift), + RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift), + RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift), + RO_FSQRT_D = OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift), + RO_FSGNJ_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift), + RO_FSGNJN_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift), + RO_FSQNJX_D = OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift), + RO_FMIN_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift), + RO_FMAX_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift), + RO_FCVT_S_D = OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift), + RO_FCVT_D_S = OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift), + RO_FEQ_D = OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift), + RO_FLT_D = OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift), + RO_FLE_D = OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift), + RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) | (0b1110001 << kFunct7Shift) | + (0b00000 << kRs2Shift), + RO_FCVT_W_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift), + RO_FCVT_WU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift), + RO_FCVT_D_W = OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift), + RO_FCVT_D_WU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift), + + // RV64D Standard Extension (in addition to RV32D) + RO_FCVT_L_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift), + RO_FCVT_LU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift), + RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) | (0b1110001 << kFunct7Shift) | + (0b00000 << kRs2Shift), + RO_FCVT_D_L = OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift), + RO_FCVT_D_LU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift), + RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111001 << kFunct7Shift) | + (0b00000 << kRs2Shift), + + // RV64C Standard Extension + RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift), + RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift), + RO_C_LW = C0 | (0b010 << kRvcFunct3Shift), + RO_C_LD = C0 | (0b011 << kRvcFunct3Shift), + RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift), + RO_C_SW = C0 | (0b110 << kRvcFunct3Shift), + RO_C_SD = C0 | (0b111 << kRvcFunct3Shift), + RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift), + RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift), + RO_C_LI = C1 | (0b010 << kRvcFunct3Shift), + RO_C_SUB = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift), + RO_C_XOR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift), + RO_C_OR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift), + RO_C_AND = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift), + RO_C_SUBW = + C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift), + RO_C_ADDW = + C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift), + RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift), + RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift), + RO_C_J = C1 | (0b101 << kRvcFunct3Shift), + RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift), + RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift), + RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift), + RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift), + RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift), + RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift), + RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift), + RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift), + RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift), + RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift), + RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift), + RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift), + RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift), + RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift), + RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift), + + // RVV Extension + OP_V = 0b1010111, + OP_IVV = OP_V | (0b000 << kFunct3Shift), + OP_FVV = OP_V | (0b001 << kFunct3Shift), + OP_MVV = OP_V | (0b010 << kFunct3Shift), + OP_IVI = OP_V | (0b011 << kFunct3Shift), + OP_IVX = OP_V | (0b100 << kFunct3Shift), + OP_FVF = OP_V | (0b101 << kFunct3Shift), + OP_MVX = OP_V | (0b110 << kFunct3Shift), + + RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31, + RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30, + RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31, + + // RVV LOAD/STORE + RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift), + RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift), + RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift), + + RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift), + RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift), + RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift), + RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift), + // THE kFunct6Shift is mop + RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift), + RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift), + RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift), + RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift), + RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift), + RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift), + RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift), + + RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift), + RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift), + RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift), + RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift), + RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift), + RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift), + RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift), + + RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift), + RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift), + RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift), + RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift), + RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift), + RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift), + RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift), + + RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift), + RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift), + RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift), + RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift), + RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift), + RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift), + RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift), + + RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift), + RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift), + RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift), + RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift), + RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift), + RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift), + RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift), + + RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift), + RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift), + RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift), + RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift), + RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift), + RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift), + RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift), + + // RVV Vector Arithmetic Instruction + VADD_FUNCT6 = 0b000000, + RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift), + + VSUB_FUNCT6 = 0b000010, + RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift), + RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift), + + VDIVU_FUNCT6 = 0b100000, + RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift), + RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift), + + VDIV_FUNCT6 = 0b100001, + RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift), + RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift), + + VREMU_FUNCT6 = 0b100010, + RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift), + RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift), + + VREM_FUNCT6 = 0b100011, + RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift), + RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift), + + VMULHU_FUNCT6 = 0b100100, + RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift), + + VMUL_FUNCT6 = 0b100101, + RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift), + RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift), + + VWMUL_FUNCT6 = 0b111011, + RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift), + RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift), + + VWMULU_FUNCT6 = 0b111000, + RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift), + RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift), + + VMULHSU_FUNCT6 = 0b100110, + RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift), + + VMULH_FUNCT6 = 0b100111, + RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift), + RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift), + + VWADD_FUNCT6 = 0b110001, + RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift), + + VWADDU_FUNCT6 = 0b110000, + RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift), + RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift), + + VWADDUW_FUNCT6 = 0b110101, + RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift), + RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift), + + VCOMPRESS_FUNCT6 = 0b010111, + RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift), + + VSADDU_FUNCT6 = 0b100000, + RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift), + RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift), + RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift), + + VSADD_FUNCT6 = 0b100001, + RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift), + + VSSUB_FUNCT6 = 0b100011, + RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift), + RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift), + + VSSUBU_FUNCT6 = 0b100010, + RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift), + RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift), + + VRSUB_FUNCT6 = 0b000011, + RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift), + RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift), + + VMINU_FUNCT6 = 0b000100, + RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift), + + VMIN_FUNCT6 = 0b000101, + RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift), + RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift), + + VMAXU_FUNCT6 = 0b000110, + RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift), + + VMAX_FUNCT6 = 0b000111, + RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift), + RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift), + + VAND_FUNCT6 = 0b001001, + RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift), + RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift), + RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift), + + VOR_FUNCT6 = 0b001010, + RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift), + RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift), + RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift), + + VXOR_FUNCT6 = 0b001011, + RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift), + RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift), + RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift), + + VRGATHER_FUNCT6 = 0b001100, + RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift), + RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift), + RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift), + + VMV_FUNCT6 = 0b010111, + RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift), + RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift), + RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMV_VF = OP_FVF | (VMV_FUNCT6 << kRvvFunct6Shift), + + RO_V_VMERGE_VI = RO_V_VMV_VI, + RO_V_VMERGE_VV = RO_V_VMV_VV, + RO_V_VMERGE_VX = RO_V_VMV_VX, + + VMSEQ_FUNCT6 = 0b011000, + RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift), + + VMSNE_FUNCT6 = 0b011001, + RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift), + + VMSLTU_FUNCT6 = 0b011010, + RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift), + + VMSLT_FUNCT6 = 0b011011, + RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift), + + VMSLE_FUNCT6 = 0b011101, + RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift), + + VMSLEU_FUNCT6 = 0b011100, + RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift), + + VMSGTU_FUNCT6 = 0b011110, + RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift), + + VMSGT_FUNCT6 = 0b011111, + RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift), + RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift), + + VSLIDEUP_FUNCT6 = 0b001110, + RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift), + RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift), + + VSLIDEDOWN_FUNCT6 = 0b001111, + RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift), + RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift), + + VSRL_FUNCT6 = 0b101000, + RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift), + RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift), + RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift), + + VSRA_FUNCT6 = 0b101001, + RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift), + RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift), + RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift), + + VSLL_FUNCT6 = 0b100101, + RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift), + RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift), + RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift), + + VSMUL_FUNCT6 = 0b100111, + RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift), + RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift), + + VADC_FUNCT6 = 0b010000, + RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift), + RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift), + RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift), + + VMADC_FUNCT6 = 0b010001, + RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift), + RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift), + RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift), + + VWXUNARY0_FUNCT6 = 0b010000, + VRXUNARY0_FUNCT6 = 0b010000, + VMUNARY0_FUNCT6 = 0b010100, + + RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift), + RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift), + RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift), + + VID_V = 0b10001, + + VXUNARY0_FUNCT6 = 0b010010, + RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift), + + VWFUNARY0_FUNCT6 = 0b010000, + RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift), + + VRFUNARY0_FUNCT6 = 0b010000, + RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift), + + VREDMAXU_FUNCT6 = 0b000110, + RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift), + VREDMAX_FUNCT6 = 0b000111, + RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift), + + VREDMINU_FUNCT6 = 0b000100, + RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift), + VREDMIN_FUNCT6 = 0b000101, + RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift), + + VFUNARY0_FUNCT6 = 0b010010, + RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift), + VFUNARY1_FUNCT6 = 0b010011, + RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift), + + VFCVT_XU_F_V = 0b00000, + VFCVT_X_F_V = 0b00001, + VFCVT_F_XU_V = 0b00010, + VFCVT_F_X_V = 0b00011, + VFWCVT_XU_F_V = 0b01000, + VFWCVT_X_F_V = 0b01001, + VFWCVT_F_XU_V = 0b01010, + VFWCVT_F_X_V = 0b01011, + VFWCVT_F_F_V = 0b01100, + VFNCVT_F_F_W = 0b10100, + VFNCVT_X_F_W = 0b10001, + VFNCVT_XU_F_W = 0b10000, + + VFCLASS_V = 0b10000, + VFSQRT_V = 0b00000, + VFRSQRT7_V = 0b00100, + VFREC7_V = 0b00101, + + VFADD_FUNCT6 = 0b000000, + RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift), + + VFSUB_FUNCT6 = 0b000010, + RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift), + RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift), + + VFDIV_FUNCT6 = 0b100000, + RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift), + RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift), + + VFMUL_FUNCT6 = 0b100100, + RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift), + + // Vector Widening Floating-Point Add/Subtract Instructions + VFWADD_FUNCT6 = 0b110000, + RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift), + + VFWSUB_FUNCT6 = 0b110010, + RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift), + + VFWADD_W_FUNCT6 = 0b110100, + RO_V_VFWADD_W_VV = OP_FVV | (VFWADD_W_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWADD_W_VF = OP_FVF | (VFWADD_W_FUNCT6 << kRvvFunct6Shift), + + VFWSUB_W_FUNCT6 = 0b110110, + RO_V_VFWSUB_W_VV = OP_FVV | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWSUB_W_VF = OP_FVF | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift), + + // Vector Widening Floating-Point Reduction Instructions + VFWREDUSUM_FUNCT6 = 0b110001, + RO_V_VFWREDUSUM_VV = OP_FVV | (VFWREDUSUM_FUNCT6 << kRvvFunct6Shift), + + VFWREDOSUM_FUNCT6 = 0b110011, + RO_V_VFWREDOSUM_VV = OP_FVV | (VFWREDOSUM_FUNCT6 << kRvvFunct6Shift), + + // Vector Widening Floating-Point Multiply + VFWMUL_FUNCT6 = 0b111000, + RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift), + + VMFEQ_FUNCT6 = 0b011000, + RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift), + RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift), + + VMFNE_FUNCT6 = 0b011100, + RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift), + RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift), + + VMFLT_FUNCT6 = 0b011011, + RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift), + RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift), + + VMFLE_FUNCT6 = 0b011001, + RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift), + RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift), + + VMFGE_FUNCT6 = 0b011111, + RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift), + + VMFGT_FUNCT6 = 0b011101, + RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift), + + VFMAX_FUNCT6 = 0b000110, + RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift), + + VFREDMAX_FUNCT6 = 0b0001111, + RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift), + + VFMIN_FUNCT6 = 0b000100, + RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift), + + VFSGNJ_FUNCT6 = 0b001000, + RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift), + RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift), + + VFSGNJN_FUNCT6 = 0b001001, + RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift), + RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift), + + VFSGNJX_FUNCT6 = 0b001010, + RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift), + RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift), + + VFMADD_FUNCT6 = 0b101000, + RO_V_VFMADD_VV = OP_FVV | (VFMADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMADD_VF = OP_FVF | (VFMADD_FUNCT6 << kRvvFunct6Shift), + + VFNMADD_FUNCT6 = 0b101001, + RO_V_VFNMADD_VV = OP_FVV | (VFNMADD_FUNCT6 << kRvvFunct6Shift), + RO_V_VFNMADD_VF = OP_FVF | (VFNMADD_FUNCT6 << kRvvFunct6Shift), + + VFMSUB_FUNCT6 = 0b101010, + RO_V_VFMSUB_VV = OP_FVV | (VFMSUB_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMSUB_VF = OP_FVF | (VFMSUB_FUNCT6 << kRvvFunct6Shift), + + VFNMSUB_FUNCT6 = 0b101011, + RO_V_VFNMSUB_VV = OP_FVV | (VFNMSUB_FUNCT6 << kRvvFunct6Shift), + RO_V_VFNMSUB_VF = OP_FVF | (VFNMSUB_FUNCT6 << kRvvFunct6Shift), + + VFMACC_FUNCT6 = 0b101100, + RO_V_VFMACC_VV = OP_FVV | (VFMACC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMACC_VF = OP_FVF | (VFMACC_FUNCT6 << kRvvFunct6Shift), + + VFNMACC_FUNCT6 = 0b101101, + RO_V_VFNMACC_VV = OP_FVV | (VFNMACC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFNMACC_VF = OP_FVF | (VFNMACC_FUNCT6 << kRvvFunct6Shift), + + VFMSAC_FUNCT6 = 0b101110, + RO_V_VFMSAC_VV = OP_FVV | (VFMSAC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFMSAC_VF = OP_FVF | (VFMSAC_FUNCT6 << kRvvFunct6Shift), + + VFNMSAC_FUNCT6 = 0b101111, + RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift), + + // Vector Widening Floating-Point Fused Multiply-Add Instructions + VFWMACC_FUNCT6 = 0b111100, + RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift), + + VFWNMACC_FUNCT6 = 0b111101, + RO_V_VFWNMACC_VV = OP_FVV | (VFWNMACC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWNMACC_VF = OP_FVF | (VFWNMACC_FUNCT6 << kRvvFunct6Shift), + + VFWMSAC_FUNCT6 = 0b111110, + RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift), + + VFWNMSAC_FUNCT6 = 0b111111, + RO_V_VFWNMSAC_VV = OP_FVV | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift), + RO_V_VFWNMSAC_VF = OP_FVF | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift), + + VNCLIP_FUNCT6 = 0b101111, + RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift), + RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift), + RO_V_VNCLIP_WI = OP_IVI | (VNCLIP_FUNCT6 << kRvvFunct6Shift), + + VNCLIPU_FUNCT6 = 0b101110, + RO_V_VNCLIPU_WV = OP_IVV | (VNCLIPU_FUNCT6 << kRvvFunct6Shift), + RO_V_VNCLIPU_WX = OP_IVX | (VNCLIPU_FUNCT6 << kRvvFunct6Shift), + RO_V_VNCLIPU_WI = OP_IVI | (VNCLIPU_FUNCT6 << kRvvFunct6Shift), +}; + +// ----- Emulated conditions. +// On RISC-V we use this enum to abstract from conditional branch instructions. +// The 'U' prefix is used to specify unsigned comparisons. +// Opposite conditions must be paired as odd/even numbers +// because 'NegateCondition' function flips LSB to negate condition. +enum Condition { // Any value < 0 is considered no_condition. + kNoCondition = -1, + overflow = 0, + no_overflow = 1, + Uless = 2, + Ugreater_equal = 3, + Uless_equal = 4, + Ugreater = 5, + equal = 6, + not_equal = 7, // Unordered or Not Equal. + less = 8, + greater_equal = 9, + less_equal = 10, + greater = 11, + cc_always = 12, + + // Aliases. + eq = equal, + ne = not_equal, + ge = greater_equal, + lt = less, + gt = greater, + le = less_equal, + al = cc_always, + ult = Uless, + uge = Ugreater_equal, + ule = Uless_equal, + ugt = Ugreater, +}; + +// Returns the equivalent of !cc. +// Negation of the default kNoCondition (-1) results in a non-default +// no_condition value (-2). As long as tests for no_condition check +// for condition < 0, this will work as expected. +inline Condition NegateCondition(Condition cc) { + DCHECK(cc != cc_always); + return static_cast(cc ^ 1); +} + +inline Condition NegateFpuCondition(Condition cc) { + DCHECK(cc != cc_always); + switch (cc) { + case ult: + return ge; + case ugt: + return le; + case uge: + return lt; + case ule: + return gt; + case lt: + return uge; + case gt: + return ule; + case ge: + return ult; + case le: + return ugt; + case eq: + return ne; + case ne: + return eq; + default: + return cc; + } +} + +// ----- Coprocessor conditions. +enum FPUCondition { + kNoFPUCondition = -1, + EQ = 0x02, // Ordered and Equal + NE = 0x03, // Unordered or Not Equal + LT = 0x04, // Ordered and Less Than + GE = 0x05, // Ordered and Greater Than or Equal + LE = 0x06, // Ordered and Less Than or Equal + GT = 0x07, // Ordered and Greater Than +}; + +enum CheckForInexactConversion { + kCheckForInexactConversion, + kDontCheckForInexactConversion +}; + +enum class MaxMinKind : int { kMin = 0, kMax = 1 }; + +// ---------------------------------------------------------------------------- +// RISCV flags + +enum ControlStatusReg { + csr_fflags = 0x001, // Floating-Point Accrued Exceptions (RW) + csr_frm = 0x002, // Floating-Point Dynamic Rounding Mode (RW) + csr_fcsr = 0x003, // Floating-Point Control and Status Register (RW) + csr_cycle = 0xc00, // Cycle counter for RDCYCLE instruction (RO) + csr_time = 0xc01, // Timer for RDTIME instruction (RO) + csr_instret = 0xc02, // Insns-retired counter for RDINSTRET instruction (RO) + csr_cycleh = 0xc80, // Upper 32 bits of cycle, RV32I only (RO) + csr_timeh = 0xc81, // Upper 32 bits of time, RV32I only (RO) + csr_instreth = 0xc82 // Upper 32 bits of instret, RV32I only (RO) +}; + +enum FFlagsMask { + kInvalidOperation = 0b10000, // NV: Invalid + kDivideByZero = 0b1000, // DZ: Divide by Zero + kOverflow = 0b100, // OF: Overflow + kUnderflow = 0b10, // UF: Underflow + kInexact = 0b1 // NX: Inexact +}; + +enum RoundingMode { + RNE = 0b000, // Round to Nearest, ties to Even + RTZ = 0b001, // Round towards Zero + RDN = 0b010, // Round Down (towards -infinity) + RUP = 0b011, // Round Up (towards +infinity) + RMM = 0b100, // Round to Nearest, tiest to Max Magnitude + DYN = 0b111 // In instruction's rm field, selects dynamic rounding mode; + // In Rounding Mode register, Invalid +}; + +enum MemoryOdering { + PSI = 0b1000, // PI or SI + PSO = 0b0100, // PO or SO + PSR = 0b0010, // PR or SR + PSW = 0b0001, // PW or SW + PSIORW = PSI | PSO | PSR | PSW +}; + +const int kFloat32ExponentBias = 127; +const int kFloat32MantissaBits = 23; +const int kFloat32ExponentBits = 8; +const int kFloat64ExponentBias = 1023; +const int kFloat64MantissaBits = 52; +const int kFloat64ExponentBits = 11; + +enum FClassFlag { + kNegativeInfinity = 1, + kNegativeNormalNumber = 1 << 1, + kNegativeSubnormalNumber = 1 << 2, + kNegativeZero = 1 << 3, + kPositiveZero = 1 << 4, + kPositiveSubnormalNumber = 1 << 5, + kPositiveNormalNumber = 1 << 6, + kPositiveInfinity = 1 << 7, + kSignalingNaN = 1 << 8, + kQuietNaN = 1 << 9 +}; + +#define RVV_SEW(V) \ + V(E8) \ + V(E16) \ + V(E32) \ + V(E64) + +#define DEFINE_FLAG(name) name, +enum VSew { + RVV_SEW(DEFINE_FLAG) +#undef DEFINE_FLAG +}; + +#define RVV_LMUL(V) \ + V(m1) \ + V(m2) \ + V(m4) \ + V(m8) \ + V(RESERVERD) \ + V(mf8) \ + V(mf4) \ + V(mf2) + +enum Vlmul { +#define DEFINE_FLAG(name) name, + RVV_LMUL(DEFINE_FLAG) +#undef DEFINE_FLAG +}; + +enum TailAgnosticType { + ta = 0x1, // Tail agnostic + tu = 0x0, // Tail undisturbed +}; + +enum MaskAgnosticType { + ma = 0x1, // Mask agnostic + mu = 0x0, // Mask undisturbed +}; +enum MaskType { + Mask = 0x0, // use the mask + NoMask = 0x1, +}; + +// ----------------------------------------------------------------------------- +// Hints. + +// Branch hints are not used on RISC-V. They are defined so that they can +// appear in shared function signatures, but will be ignored in RISC-V +// implementations. +enum Hint { no_hint = 0 }; + +inline Hint NegateHint(Hint hint) { return no_hint; } + +// ----------------------------------------------------------------------------- +// Specific instructions, constants, and masks. +// These constants are declared in assembler-riscv64.cc, as they use named +// registers and other constants. + +// An Illegal instruction +const Instr kIllegalInstr = 0; // All other bits are 0s (i.e., ecall) +// An ECALL instruction, used for redirected real time call +const Instr rtCallRedirInstr = SYSTEM; // All other bits are 0s (i.e., ecall) +// An EBreak instruction, used for debugging and semi-hosting +const Instr kBreakInstr = SYSTEM | 1 << kImm12Shift; // ebreak + +constexpr uint8_t kInstrSize = 4; +constexpr uint8_t kShortInstrSize = 2; +constexpr uint8_t kInstrSizeLog2 = 2; + +class InstructionBase { + public: + enum { + // On RISC-V, PC cannot actually be directly accessed. We behave as if PC + // was always the value of the current instruction being executed. + kPCReadOffset = 0 + }; + + // Instruction type. + enum Type { + kRType, + kR4Type, // Special R4 for Q extension + kIType, + kSType, + kBType, + kUType, + kJType, + // C extension + kCRType, + kCIType, + kCSSType, + kCIWType, + kCLType, + kCSType, + kCAType, + kCBType, + kCJType, + // V extension + kVType, + kVLType, + kVSType, + kVAMOType, + kVIVVType, + kVFVVType, + kVMVVType, + kVIVIType, + kVIVXType, + kVFVFType, + kVMVXType, + kVSETType, + kUnsupported = -1 + }; + + inline bool IsIllegalInstruction() const { + uint16_t FirstHalfWord = *reinterpret_cast(this); + return FirstHalfWord == 0; + } + + inline bool IsShortInstruction() const { + uint8_t FirstByte = *reinterpret_cast(this); + return (FirstByte & 0x03) <= C2; + } + + inline uint8_t InstructionSize() const { + return (FLAG_riscv_c_extension && this->IsShortInstruction()) + ? kShortInstrSize + : kInstrSize; + } + + // Get the raw instruction bits. + inline Instr InstructionBits() const { + if (FLAG_riscv_c_extension && this->IsShortInstruction()) { + return 0x0000FFFF & (*reinterpret_cast(this)); + } + return *reinterpret_cast(this); + } + + // Set the raw instruction bits to value. + inline void SetInstructionBits(Instr value) { + *reinterpret_cast(this) = value; + } + + // Read one particular bit out of the instruction bits. + inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } + + // Read a bit field out of the instruction bits. + inline int Bits(int hi, int lo) const { + return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1); + } + + // Accessors for the different named fields used in the RISC-V encoding. + inline Opcode BaseOpcodeValue() const { + return static_cast( + Bits(kBaseOpcodeShift + kBaseOpcodeBits - 1, kBaseOpcodeShift)); + } + + // Return the fields at their original place in the instruction encoding. + inline Opcode BaseOpcodeFieldRaw() const { + return static_cast(InstructionBits() & kBaseOpcodeMask); + } + + // Safe to call within R-type instructions + inline int Funct7FieldRaw() const { return InstructionBits() & kFunct7Mask; } + + // Safe to call within R-, I-, S-, or B-type instructions + inline int Funct3FieldRaw() const { return InstructionBits() & kFunct3Mask; } + + // Safe to call within R-, I-, S-, or B-type instructions + inline int Rs1FieldRawNoAssert() const { + return InstructionBits() & kRs1FieldMask; + } + + // Safe to call within R-, S-, or B-type instructions + inline int Rs2FieldRawNoAssert() const { + return InstructionBits() & kRs2FieldMask; + } + + // Safe to call within R4-type instructions + inline int Rs3FieldRawNoAssert() const { + return InstructionBits() & kRs3FieldMask; + } + + inline int32_t ITypeBits() const { return InstructionBits() & kITypeMask; } + + inline int32_t InstructionOpcodeType() const { + if (IsShortInstruction()) { + return InstructionBits() & kRvcOpcodeMask; + } else { + return InstructionBits() & kBaseOpcodeMask; + } + } + + // Get the encoding type of the instruction. + Type InstructionType() const; + + protected: + InstructionBase() {} +}; + +template +class InstructionGetters : public T { + public: + inline int BaseOpcode() const { + return this->InstructionBits() & kBaseOpcodeMask; + } + + inline int RvcOpcode() const { + DCHECK(this->IsShortInstruction()); + return this->InstructionBits() & kRvcOpcodeMask; + } + + inline int Rs1Value() const { + DCHECK(this->InstructionType() == InstructionBase::kRType || + this->InstructionType() == InstructionBase::kR4Type || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kSType || + this->InstructionType() == InstructionBase::kBType || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kVType); + return this->Bits(kRs1Shift + kRs1Bits - 1, kRs1Shift); + } + + inline int Rs2Value() const { + DCHECK(this->InstructionType() == InstructionBase::kRType || + this->InstructionType() == InstructionBase::kR4Type || + this->InstructionType() == InstructionBase::kSType || + this->InstructionType() == InstructionBase::kBType || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kVType); + return this->Bits(kRs2Shift + kRs2Bits - 1, kRs2Shift); + } + + inline int Rs3Value() const { + DCHECK(this->InstructionType() == InstructionBase::kR4Type); + return this->Bits(kRs3Shift + kRs3Bits - 1, kRs3Shift); + } + + inline int Vs1Value() const { + DCHECK(this->InstructionType() == InstructionBase::kVType || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kSType); + return this->Bits(kVs1Shift + kVs1Bits - 1, kVs1Shift); + } + + inline int Vs2Value() const { + DCHECK(this->InstructionType() == InstructionBase::kVType || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kSType); + return this->Bits(kVs2Shift + kVs2Bits - 1, kVs2Shift); + } + + inline int VdValue() const { + DCHECK(this->InstructionType() == InstructionBase::kVType || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kSType); + return this->Bits(kVdShift + kVdBits - 1, kVdShift); + } + + inline int RdValue() const { + DCHECK(this->InstructionType() == InstructionBase::kRType || + this->InstructionType() == InstructionBase::kR4Type || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kSType || + this->InstructionType() == InstructionBase::kUType || + this->InstructionType() == InstructionBase::kJType || + this->InstructionType() == InstructionBase::kVType); + return this->Bits(kRdShift + kRdBits - 1, kRdShift); + } + + inline int RvcRdValue() const { + DCHECK(this->IsShortInstruction()); + return this->Bits(kRvcRdShift + kRvcRdBits - 1, kRvcRdShift); + } + + inline int RvcRs1Value() const { return this->RvcRdValue(); } + + inline int RvcRs2Value() const { + DCHECK(this->IsShortInstruction()); + return this->Bits(kRvcRs2Shift + kRvcRs2Bits - 1, kRvcRs2Shift); + } + + inline int RvcRs1sValue() const { + DCHECK(this->IsShortInstruction()); + return 0b1000 + this->Bits(kRvcRs1sShift + kRvcRs1sBits - 1, kRvcRs1sShift); + } + + inline int RvcRs2sValue() const { + DCHECK(this->IsShortInstruction()); + return 0b1000 + this->Bits(kRvcRs2sShift + kRvcRs2sBits - 1, kRvcRs2sShift); + } + + inline int Funct7Value() const { + DCHECK(this->InstructionType() == InstructionBase::kRType); + return this->Bits(kFunct7Shift + kFunct7Bits - 1, kFunct7Shift); + } + + inline int Funct3Value() const { + DCHECK(this->InstructionType() == InstructionBase::kRType || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kSType || + this->InstructionType() == InstructionBase::kBType); + return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift); + } + + inline int Funct5Value() const { + DCHECK(this->InstructionType() == InstructionBase::kRType && + this->BaseOpcode() == OP_FP); + return this->Bits(kFunct5Shift + kFunct5Bits - 1, kFunct5Shift); + } + + inline int RvcFunct6Value() const { + DCHECK(this->IsShortInstruction()); + return this->Bits(kRvcFunct6Shift + kRvcFunct6Bits - 1, kRvcFunct6Shift); + } + + inline int RvcFunct4Value() const { + DCHECK(this->IsShortInstruction()); + return this->Bits(kRvcFunct4Shift + kRvcFunct4Bits - 1, kRvcFunct4Shift); + } + + inline int RvcFunct3Value() const { + DCHECK(this->IsShortInstruction()); + return this->Bits(kRvcFunct3Shift + kRvcFunct3Bits - 1, kRvcFunct3Shift); + } + + inline int RvcFunct2Value() const { + DCHECK(this->IsShortInstruction()); + return this->Bits(kRvcFunct2Shift + kRvcFunct2Bits - 1, kRvcFunct2Shift); + } + + inline int RvcFunct2BValue() const { + DCHECK(this->IsShortInstruction()); + return this->Bits(kRvcFunct2BShift + kRvcFunct2Bits - 1, kRvcFunct2BShift); + } + + inline int CsrValue() const { + DCHECK(this->InstructionType() == InstructionBase::kIType && + this->BaseOpcode() == SYSTEM); + return (this->Bits(kCsrShift + kCsrBits - 1, kCsrShift)); + } + + inline int RoundMode() const { + DCHECK((this->InstructionType() == InstructionBase::kRType || + this->InstructionType() == InstructionBase::kR4Type) && + this->BaseOpcode() == OP_FP); + return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift); + } + + inline int MemoryOrder(bool is_pred) const { + DCHECK((this->InstructionType() == InstructionBase::kIType && + this->BaseOpcode() == MISC_MEM)); + if (is_pred) { + return this->Bits(kPredOrderShift + kMemOrderBits - 1, kPredOrderShift); + } else { + return this->Bits(kSuccOrderShift + kMemOrderBits - 1, kSuccOrderShift); + } + } + + inline int Imm12Value() const { + DCHECK(this->InstructionType() == InstructionBase::kIType); + int Value = this->Bits(kImm12Shift + kImm12Bits - 1, kImm12Shift); + return Value << 20 >> 20; + } + + inline int32_t Imm12SExtValue() const { + int32_t Value = this->Imm12Value() << 20 >> 20; + return Value; + } + + inline int BranchOffset() const { + DCHECK(this->InstructionType() == InstructionBase::kBType); + // | imm[12|10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode | + // 31 25 11 7 + uint32_t Bits = this->InstructionBits(); + int16_t imm13 = ((Bits & 0xf00) >> 7) | ((Bits & 0x7e000000) >> 20) | + ((Bits & 0x80) << 4) | ((Bits & 0x80000000) >> 19); + return imm13 << 19 >> 19; + } + + inline int StoreOffset() const { + DCHECK(this->InstructionType() == InstructionBase::kSType); + // | imm[11:5] | rs2 | rs1 | funct3 | imm[4:0] | opcode | + // 31 25 11 7 + uint32_t Bits = this->InstructionBits(); + int16_t imm12 = ((Bits & 0xf80) >> 7) | ((Bits & 0xfe000000) >> 20); + return imm12 << 20 >> 20; + } + + inline int Imm20UValue() const { + DCHECK(this->InstructionType() == InstructionBase::kUType); + // | imm[31:12] | rd | opcode | + // 31 12 + int32_t Bits = this->InstructionBits(); + return Bits >> 12; + } + + inline int Imm20JValue() const { + DCHECK(this->InstructionType() == InstructionBase::kJType); + // | imm[20|10:1|11|19:12] | rd | opcode | + // 31 12 + uint32_t Bits = this->InstructionBits(); + int32_t imm20 = ((Bits & 0x7fe00000) >> 20) | ((Bits & 0x100000) >> 9) | + (Bits & 0xff000) | ((Bits & 0x80000000) >> 11); + return imm20 << 11 >> 11; + } + + inline bool IsArithShift() const { + // Valid only for right shift operations + DCHECK((this->BaseOpcode() == OP || this->BaseOpcode() == OP_32 || + this->BaseOpcode() == OP_IMM || this->BaseOpcode() == OP_IMM_32) && + this->Funct3Value() == 0b101); + return this->InstructionBits() & 0x40000000; + } + + inline int Shamt() const { + // Valid only for shift instructions (SLLI, SRLI, SRAI) + DCHECK((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM && + (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101)); + // | 0A0000 | shamt | rs1 | funct3 | rd | opcode | + // 31 25 20 + return this->Bits(kImm12Shift + 5, kImm12Shift); + } + + inline int Shamt32() const { + // Valid only for shift instructions (SLLIW, SRLIW, SRAIW) + DCHECK((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM_32 && + (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101)); + // | 0A00000 | shamt | rs1 | funct3 | rd | opcode | + // 31 24 20 + return this->Bits(kImm12Shift + 4, kImm12Shift); + } + + inline int RvcImm6Value() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | imm[5] | rs1/rd | imm[4:0] | opcode | + // 15 12 6 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm6 = ((Bits & 0x1000) >> 7) | ((Bits & 0x7c) >> 2); + return imm6 << 26 >> 26; + } + + inline int RvcImm6Addi16spValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | nzimm[9] | 2 | nzimm[4|6|8:7|5] | opcode | + // 15 12 6 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm10 = ((Bits & 0x1000) >> 3) | ((Bits & 0x40) >> 2) | + ((Bits & 0x20) << 1) | ((Bits & 0x18) << 4) | + ((Bits & 0x4) << 3); + DCHECK_NE(imm10, 0); + return imm10 << 22 >> 22; + } + + inline int RvcImm8Addi4spnValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | nzimm[11] | rd' | opcode | + // 15 13 5 2 + uint32_t Bits = this->InstructionBits(); + int32_t uimm10 = ((Bits & 0x20) >> 2) | ((Bits & 0x40) >> 4) | + ((Bits & 0x780) >> 1) | ((Bits & 0x1800) >> 7); + DCHECK_NE(uimm10, 0); + return uimm10; + } + + inline int RvcShamt6() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | nzuimm[5] | rs1/rd | nzuimm[4:0] | opcode | + // 15 12 6 2 + int32_t imm6 = this->RvcImm6Value(); + return imm6 & 0x3f; + } + + inline int RvcImm6LwspValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | uimm[5] | rs1 | uimm[4:2|7:6] | opcode | + // 15 12 6 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm8 = + ((Bits & 0x1000) >> 7) | ((Bits & 0x70) >> 2) | ((Bits & 0xc) << 4); + return imm8; + } + + inline int RvcImm6LdspValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | uimm[5] | rs1 | uimm[4:3|8:6] | opcode | + // 15 12 6 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm9 = + ((Bits & 0x1000) >> 7) | ((Bits & 0x60) >> 2) | ((Bits & 0x1c) << 4); + return imm9; + } + + inline int RvcImm6SwspValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | uimm[5:2|7:6] | rs2 | opcode | + // 15 12 7 + uint32_t Bits = this->InstructionBits(); + int32_t imm8 = ((Bits & 0x1e00) >> 7) | ((Bits & 0x180) >> 1); + return imm8; + } + + inline int RvcImm6SdspValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | uimm[5:3|8:6] | rs2 | opcode | + // 15 12 7 + uint32_t Bits = this->InstructionBits(); + int32_t imm9 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x380) >> 1); + return imm9; + } + + inline int RvcImm5WValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | imm[5:3] | rs1 | imm[2|6] | rd | opcode | + // 15 12 10 6 4 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm7 = + ((Bits & 0x1c00) >> 7) | ((Bits & 0x40) >> 4) | ((Bits & 0x20) << 1); + return imm7; + } + + inline int RvcImm5DValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | imm[5:3] | rs1 | imm[7:6] | rd | opcode | + // 15 12 10 6 4 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm8 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x60) << 1); + return imm8; + } + + inline int RvcImm11CJValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | [11|4|9:8|10|6|7|3:1|5] | opcode | + // 15 12 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm12 = ((Bits & 0x4) << 3) | ((Bits & 0x38) >> 2) | + ((Bits & 0x40) << 1) | ((Bits & 0x80) >> 1) | + ((Bits & 0x100) << 2) | ((Bits & 0x600) >> 1) | + ((Bits & 0x800) >> 7) | ((Bits & 0x1000) >> 1); + return imm12 << 20 >> 20; + } + + inline int RvcImm8BValue() const { + DCHECK(this->IsShortInstruction()); + // | funct3 | imm[8|4:3] | rs1` | imm[7:6|2:1|5] | opcode | + // 15 12 10 7 2 + uint32_t Bits = this->InstructionBits(); + int32_t imm9 = ((Bits & 0x4) << 3) | ((Bits & 0x18) >> 2) | + ((Bits & 0x60) << 1) | ((Bits & 0xc00) >> 7) | + ((Bits & 0x1000) >> 4); + return imm9 << 23 >> 23; + } + + inline int vl_vs_width() { + int width = 0; + if ((this->InstructionBits() & kBaseOpcodeMask) != LOAD_FP && + (this->InstructionBits() & kBaseOpcodeMask) != STORE_FP) + return -1; + switch (this->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) { + case 0x0: + width = 8; + break; + case 0x00005000: + width = 16; + break; + case 0x00006000: + width = 32; + break; + case 0x00007000: + width = 64; + break; + case 0x10000000: + width = 128; + break; + case 0x10005000: + width = 256; + break; + case 0x10006000: + width = 512; + break; + case 0x10007000: + width = 1024; + break; + default: + width = -1; + break; + } + return width; + } + + inline uint32_t Rvvzimm() const { + if ((this->InstructionBits() & + (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) == RO_V_VSETVLI) { + uint32_t Bits = this->InstructionBits(); + uint32_t zimm = Bits & kRvvZimmMask; + return zimm >> kRvvZimmShift; + } else { + DCHECK_EQ(this->InstructionBits() & + (kBaseOpcodeMask | kFunct3Mask | 0xC0000000), + RO_V_VSETIVLI); + uint32_t Bits = this->InstructionBits(); + uint32_t zimm = Bits & kRvvZimmMask; + return (zimm >> kRvvZimmShift) & 0x3FF; + } + } + + inline uint32_t Rvvuimm() const { + DCHECK_EQ( + this->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask | 0xC0000000), + RO_V_VSETIVLI); + uint32_t Bits = this->InstructionBits(); + uint32_t uimm = Bits & kRvvUimmMask; + return uimm >> kRvvUimmShift; + } + + inline uint32_t RvvVsew() const { + uint32_t zimm = this->Rvvzimm(); + uint32_t vsew = (zimm >> 3) & 0x7; + return vsew; + } + + inline uint32_t RvvVlmul() const { + uint32_t zimm = this->Rvvzimm(); + uint32_t vlmul = zimm & 0x7; + return vlmul; + } + + inline uint8_t RvvVM() const { + DCHECK(this->InstructionType() == InstructionBase::kVType || + this->InstructionType() == InstructionBase::kIType || + this->InstructionType() == InstructionBase::kSType); + return this->Bits(kRvvVmShift + kRvvVmBits - 1, kRvvVmShift); + } + + inline const char* RvvSEW() const { + uint32_t vsew = this->RvvVsew(); + switch (vsew) { +#define CAST_VSEW(name) \ + case name: \ + return #name; + RVV_SEW(CAST_VSEW) + default: + return "unknown"; +#undef CAST_VSEW + } + } + + inline const char* RvvLMUL() const { + uint32_t vlmul = this->RvvVlmul(); + switch (vlmul) { +#define CAST_VLMUL(name) \ + case name: \ + return #name; + RVV_LMUL(CAST_VLMUL) + default: + return "unknown"; +#undef CAST_VLMUL + } + } + +#define sext(x, len) (((int32_t)(x) << (32 - len)) >> (32 - len)) +#define zext(x, len) (((uint32_t)(x) << (32 - len)) >> (32 - len)) + + inline int32_t RvvSimm5() const { + DCHECK(this->InstructionType() == InstructionBase::kVType); + return sext(this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift), + kRvvImm5Bits); + } + + inline uint32_t RvvUimm5() const { + DCHECK(this->InstructionType() == InstructionBase::kVType); + uint32_t imm = this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift); + return zext(imm, kRvvImm5Bits); + } +#undef sext +#undef zext + inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); } + + inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); } + + // Say if the instruction is a break or a trap. + bool IsTrap() const; +}; + +class Instruction : public InstructionGetters { + public: + // Instructions are read of out a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instruction. + // Use the At(pc) function to create references to Instruction. + static Instruction* At(byte* pc) { + return reinterpret_cast(pc); + } + + private: + // We need to prevent the creation of instances of class Instruction. + DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); +}; + +// ----------------------------------------------------------------------------- +// RISC-V assembly various constants. + +// C/C++ argument slots size. +const int kCArgSlotCount = 0; + +// TODO(plind): below should be based on kSystemPointerSize +// TODO(plind): find all usages and remove the needless instructions for n64. +const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2; + +const int kInvalidStackOffset = -1; +const int kBranchReturnOffset = 2 * kInstrSize; + +static const int kNegOffset = 0x00008000; + +// ----------------------------------------------------------------------------- +// Instructions. + +template +bool InstructionGetters

::IsTrap() const { + return (this->InstructionBits() == kBreakInstr); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_RISCV64_CONSTANTS_RISCV64_H_ diff --git a/deps/v8/src/codegen/riscv64/cpu-riscv64.cc b/deps/v8/src/codegen/riscv64/cpu-riscv64.cc new file mode 100644 index 00000000000000..aad09378f996aa --- /dev/null +++ b/deps/v8/src/codegen/riscv64/cpu-riscv64.cc @@ -0,0 +1,32 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// CPU specific code for arm independent of OS goes here. + +#include +#include + +#if V8_TARGET_ARCH_RISCV64 + +#include "src/codegen/cpu-features.h" + +namespace v8 { +namespace internal { + +void CpuFeatures::FlushICache(void* start, size_t size) { +#if !defined(USE_SIMULATOR) + char* end = reinterpret_cast(start) + size; + // The definition of this syscall is + // SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, + // uintptr_t, end, uintptr_t, flags) + // The flag here is set to be SYS_RISCV_FLUSH_ICACHE_LOCAL, which is + // defined as 1 in the Linux kernel. + syscall(SYS_riscv_flush_icache, start, end, 1); +#endif // !USE_SIMULATOR. +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_RISCV64 diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h new file mode 100644 index 00000000000000..d98726915347fa --- /dev/null +++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h @@ -0,0 +1,327 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_ +#define V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_ + +#if V8_TARGET_ARCH_RISCV64 + +#include "src/base/template-utils.h" +#include "src/codegen/interface-descriptors.h" +#include "src/execution/frames.h" + +namespace v8 { +namespace internal { + +constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { + auto registers = RegisterArray(a0, a1, a2, a3, a4); + STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams); + return registers; +} + +#if DEBUG +template +void StaticCallInterfaceDescriptor:: + VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) { + RegList allocatable_regs = data->allocatable_registers(); + if (argc >= 1) DCHECK(allocatable_regs.has(a0)); + if (argc >= 2) DCHECK(allocatable_regs.has(a1)); + if (argc >= 3) DCHECK(allocatable_regs.has(a2)); + if (argc >= 4) DCHECK(allocatable_regs.has(a3)); + if (argc >= 5) DCHECK(allocatable_regs.has(a4)); + if (argc >= 6) DCHECK(allocatable_regs.has(a5)); + if (argc >= 7) DCHECK(allocatable_regs.has(a6)); + if (argc >= 8) DCHECK(allocatable_regs.has(a7)); + // Additional arguments are passed on the stack. +} +#endif // DEBUG + +// static +constexpr auto WriteBarrierDescriptor::registers() { + // TODO(Yuxiang): Remove a7 which is just there for padding. + return RegisterArray(a1, a5, a4, a2, a0, a3, kContextRegister, a7); +} + +// static +constexpr Register LoadDescriptor::ReceiverRegister() { return a1; } +// static +constexpr Register LoadDescriptor::NameRegister() { return a2; } +// static +constexpr Register LoadDescriptor::SlotRegister() { return a0; } + +// static +constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; } + +// static +constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() { + return a1; +} +// static +constexpr Register KeyedLoadBaselineDescriptor::NameRegister() { + return kInterpreterAccumulatorRegister; +} +// static +constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; } + +// static +constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { + return a3; +} + +// static +constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { + return kInterpreterAccumulatorRegister; +} +// static +constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; } +// static +constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; } + +// static +constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() { + return a3; +} + +// static +constexpr Register +LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { + return a4; +} + +// static +constexpr Register StoreDescriptor::ReceiverRegister() { return a1; } +// static +constexpr Register StoreDescriptor::NameRegister() { return a2; } +// static +constexpr Register StoreDescriptor::ValueRegister() { return a0; } +// static +constexpr Register StoreDescriptor::SlotRegister() { return a4; } + +// static +constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; } + +// static +constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; } + +// static +constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; } +// static +constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; } + +// static +constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; } +// static +constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; } + +// static +constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { + return a2; +} +// static +constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; } + +// static +// static +constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; } + +// static +constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); } + +// static +constexpr auto CallTrampolineDescriptor::registers() { + // a1: target + // a0: number of arguments + return RegisterArray(a1, a0); +} + +// static +constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() { + // a1 : the source + // a0 : the excluded property count + return RegisterArray(a1, a0); +} + +// static +constexpr auto +CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() { + // a1 : the source + // a0 : the excluded property count + // a2 : the excluded property base + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto CallVarargsDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // a4 : arguments list length (untagged) + // a2 : arguments list (FixedArray) + return RegisterArray(a1, a0, a4, a2); +} + +// static +constexpr auto CallForwardVarargsDescriptor::registers() { + // a1: target + // a0: number of arguments + // a2: start index (to supported rest parameters) + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto CallFunctionTemplateDescriptor::registers() { + // a1 : function template info + // a0 : number of arguments (on the stack) + return RegisterArray(a1, a0); +} + +// static +constexpr auto CallWithSpreadDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // a2 : the object to spread + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto CallWithArrayLikeDescriptor::registers() { + // a1 : the target to call + // a2 : the arguments list + return RegisterArray(a1, a2); +} + +// static +constexpr auto ConstructVarargsDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // a3 : the new target + // a4 : arguments list length (untagged) + // a2 : arguments list (FixedArray) + return RegisterArray(a1, a3, a0, a4, a2); +} + +// static +constexpr auto ConstructForwardVarargsDescriptor::registers() { + // a3: new target + // a1: target + // a0: number of arguments + // a2: start index (to supported rest parameters) + return RegisterArray(a1, a3, a0, a2); +} + +// static +constexpr auto ConstructWithSpreadDescriptor::registers() { + // a0 : number of arguments (on the stack) + // a1 : the target to call + // a3 : the new target + // a2 : the object to spread + return RegisterArray(a1, a3, a0, a2); +} + +// static +constexpr auto ConstructWithArrayLikeDescriptor::registers() { + // a1 : the target to call + // a3 : the new target + // a2 : the arguments list + return RegisterArray(a1, a3, a2); +} + +// static +constexpr auto ConstructStubDescriptor::registers() { + // a3: new target + // a1: target + // a0: number of arguments + // a2: allocation site or undefined + return RegisterArray(a1, a3, a0, a2); +} + +// static +constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); } + +// static +constexpr auto CompareDescriptor::registers() { + // a1: left operand + // a0: right operand + return RegisterArray(a1, a0); +} + +// static +constexpr auto Compare_BaselineDescriptor::registers() { + // a1: left operand + // a0: right operand + // a2: feedback slot + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto BinaryOpDescriptor::registers() { + // a1: left operand + // a0: right operand + return RegisterArray(a1, a0); +} + +// static +constexpr auto BinaryOp_BaselineDescriptor::registers() { + // a1: left operand + // a0: right operand + // a2: feedback slot + return RegisterArray(a1, a0, a2); +} + +// static +constexpr auto BinarySmiOp_BaselineDescriptor::registers() { + // a0: left operand + // a1: right operand + // a2: feedback slot + return RegisterArray(a0, a1, a2); +} + +// static +constexpr auto ApiCallbackDescriptor::registers() { + return RegisterArray(a1, // kApiFunctionAddress + a2, // kArgc + a3, // kCallData + a0); // kHolder +} + +// static +constexpr auto InterpreterDispatchDescriptor::registers() { + return RegisterArray( + kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister); +} + +// static +constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { + return RegisterArray(a0, // argument count + a2, // address of first argument + a1); // the target callable to be call +} + +// static +constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() { + return RegisterArray( + a0, // argument count + a4, // address of the first argument + a1, // constructor to call + a3, // new target + a2); // allocation site feedback if available, undefined otherwise +} + +// static +constexpr auto ResumeGeneratorDescriptor::registers() { + return RegisterArray(a0, // the value to pass to the generator + a1); // the JSGeneratorObject to resume +} + +// static +constexpr auto RunMicrotasksEntryDescriptor::registers() { + return RegisterArray(a0, a1); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_RISCV64 + +#endif // V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_ diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc new file mode 100644 index 00000000000000..33816db57fb29f --- /dev/null +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc @@ -0,0 +1,5170 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include // For LONG_MIN, LONG_MAX. + +#if V8_TARGET_ARCH_RISCV64 + +#include "src/base/bits.h" +#include "src/base/division-by-constant.h" +#include "src/codegen/assembler-inl.h" +#include "src/codegen/callable.h" +#include "src/codegen/code-factory.h" +#include "src/codegen/external-reference-table.h" +#include "src/codegen/interface-descriptors-inl.h" +#include "src/codegen/macro-assembler.h" +#include "src/codegen/register-configuration.h" +#include "src/debug/debug.h" +#include "src/deoptimizer/deoptimizer.h" +#include "src/execution/frames-inl.h" +#include "src/heap/memory-chunk.h" +#include "src/init/bootstrapper.h" +#include "src/logging/counters.h" +#include "src/objects/heap-number.h" +#include "src/runtime/runtime.h" +#include "src/snapshot/snapshot.h" +#include "src/wasm/wasm-code-manager.h" + +// Satisfy cpplint check, but don't include platform-specific header. It is +// included recursively via macro-assembler.h. +#if 0 +#include "src/codegen/riscv64/macro-assembler-riscv64.h" +#endif + +namespace v8 { +namespace internal { + +static inline bool IsZero(const Operand& rt) { + if (rt.is_reg()) { + return rt.rm() == zero_reg; + } else { + return rt.immediate() == 0; + } +} + +int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1, + Register exclusion2, + Register exclusion3) const { + int bytes = 0; + + RegList exclusions = {exclusion1, exclusion2, exclusion3}; + RegList list = kJSCallerSaved - exclusions; + bytes += list.Count() * kSystemPointerSize; + + if (fp_mode == SaveFPRegsMode::kSave) { + bytes += kCallerSavedFPU.Count() * kDoubleSize; + } + + return bytes; +} + +int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, + Register exclusion2, Register exclusion3) { + int bytes = 0; + + RegList exclusions = {exclusion1, exclusion2, exclusion3}; + RegList list = kJSCallerSaved - exclusions; + MultiPush(list); + bytes += list.Count() * kSystemPointerSize; + + if (fp_mode == SaveFPRegsMode::kSave) { + MultiPushFPU(kCallerSavedFPU); + bytes += kCallerSavedFPU.Count() * kDoubleSize; + } + + return bytes; +} + +int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, + Register exclusion2, Register exclusion3) { + int bytes = 0; + if (fp_mode == SaveFPRegsMode::kSave) { + MultiPopFPU(kCallerSavedFPU); + bytes += kCallerSavedFPU.Count() * kDoubleSize; + } + + RegList exclusions = {exclusion1, exclusion2, exclusion3}; + RegList list = kJSCallerSaved - exclusions; + MultiPop(list); + bytes += list.Count() * kSystemPointerSize; + + return bytes; +} + +void TurboAssembler::LoadRoot(Register destination, RootIndex index) { + Ld(destination, + MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); +} + +void TurboAssembler::LoadRoot(Register destination, RootIndex index, + Condition cond, Register src1, + const Operand& src2) { + Label skip; + BranchShort(&skip, NegateCondition(cond), src1, src2); + Ld(destination, + MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); + bind(&skip); +} + +void TurboAssembler::PushCommonFrame(Register marker_reg) { + if (marker_reg.is_valid()) { + Push(ra, fp, marker_reg); + Add64(fp, sp, Operand(kSystemPointerSize)); + } else { + Push(ra, fp); + Mv(fp, sp); + } +} + +void TurboAssembler::PushStandardFrame(Register function_reg) { + int offset = -StandardFrameConstants::kContextOffset; + if (function_reg.is_valid()) { + Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); + offset += 2 * kSystemPointerSize; + } else { + Push(ra, fp, cp, kJavaScriptCallArgCountRegister); + offset += kSystemPointerSize; + } + Add64(fp, sp, Operand(offset)); +} + +int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { + // The registers are pushed starting with the highest encoding, + // which means that lowest encodings are closest to the stack pointer. + return kSafepointRegisterStackIndexMap[reg_code]; +} + +// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) +// The register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWriteField(Register object, int offset, + Register value, RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + DCHECK(!AreAliased(object, value)); + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. + Label done; + + // Skip the barrier if writing a smi. + if (smi_check == SmiCheck::kInline) { + JumpIfSmi(value, &done); + } + + // Although the object register is tagged, the offset is relative to the start + // of the object, so offset must be a multiple of kTaggedSize. + DCHECK(IsAligned(offset, kTaggedSize)); + + if (FLAG_debug_code) { + Label ok; + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(!AreAliased(object, value, scratch)); + Add64(scratch, object, offset - kHeapObjectTag); + And(scratch, scratch, Operand(kTaggedSize - 1)); + BranchShort(&ok, eq, scratch, Operand(zero_reg)); + Abort(AbortReason::kUnalignedCellInWriteBarrier); + bind(&ok); + } + + RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status, + save_fp, remembered_set_action, SmiCheck::kOmit); + + bind(&done); +} + +void TurboAssembler::MaybeSaveRegisters(RegList registers) { + if (registers.is_empty()) return; + MultiPush(registers); +} + +void TurboAssembler::MaybeRestoreRegisters(RegList registers) { + if (registers.is_empty()) return; + MultiPop(registers); +} + +void TurboAssembler::CallEphemeronKeyBarrier(Register object, + Register slot_address, + SaveFPRegsMode fp_mode) { + DCHECK(!AreAliased(object, slot_address)); + RegList registers = + WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); + MaybeSaveRegisters(registers); + + Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address_parameter = + WriteBarrierDescriptor::SlotAddressRegister(); + + Push(object); + Push(slot_address); + Pop(slot_address_parameter); + Pop(object_parameter); + + Call(isolate()->builtins()->code_handle( + Builtins::GetEphemeronKeyBarrierStub(fp_mode)), + RelocInfo::CODE_TARGET); + MaybeRestoreRegisters(registers); +} + +void TurboAssembler::CallRecordWriteStubSaveRegisters( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode) { + DCHECK(!AreAliased(object, slot_address)); + RegList registers = + WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); + MaybeSaveRegisters(registers); + + Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address_parameter = + WriteBarrierDescriptor::SlotAddressRegister(); + + Push(object); + Push(slot_address); + Pop(slot_address_parameter); + Pop(object_parameter); + + CallRecordWriteStub(object_parameter, slot_address_parameter, + remembered_set_action, fp_mode, mode); + + MaybeRestoreRegisters(registers); +} + +void TurboAssembler::CallRecordWriteStub( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode) { + // Use CallRecordWriteStubSaveRegisters if the object and slot registers + // need to be caller saved. + DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); + DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); + if (mode == StubCallMode::kCallWasmRuntimeStub) { + auto wasm_target = + wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode); + Call(wasm_target, RelocInfo::WASM_STUB_CALL); + } else { + auto builtin = Builtins::GetRecordWriteStub(remembered_set_action, fp_mode); + if (options().inline_offheap_trampolines) { + // Inline the trampoline. //qj + RecordCommentForOffHeapTrampoline(builtin); + + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); + Call(scratch); + RecordComment("]"); + } else { + Handle code_target = isolate()->builtins()->code_handle(builtin); + Call(code_target, RelocInfo::CODE_TARGET); + } + } +} + +// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) +// The register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWrite(Register object, Operand offset, + Register value, RAStatus ra_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + DCHECK(!AreAliased(object, value)); + + if (FLAG_debug_code) { + UseScratchRegisterScope temps(this); + Register temp = temps.Acquire(); + DCHECK(!AreAliased(object, value, temp)); + Add64(temp, object, offset); + LoadTaggedPointerField(temp, MemOperand(temp)); + Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp, + Operand(value)); + } + + if ((remembered_set_action == RememberedSetAction::kOmit && + !FLAG_incremental_marking) || + FLAG_disable_write_barriers) { + return; + } + + // First, check if a write barrier is even needed. The tests below + // catch stores of smis and stores into the young generation. + Label done; + + if (smi_check == SmiCheck::kInline) { + DCHECK_EQ(0, kSmiTag); + JumpIfSmi(value, &done); + } + + { + UseScratchRegisterScope temps(this); + Register temp = temps.Acquire(); + CheckPageFlag(value, + temp, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, // In RISC-V, it uses cc for a comparison with 0, so if + // no bits are set, and cc is eq, it will branch to done + &done); + + CheckPageFlag(object, + temp, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + eq, // In RISC-V, it uses cc for a comparison with 0, so if + // no bits are set, and cc is eq, it will branch to done + &done); + } + // Record the actual write. + if (ra_status == kRAHasNotBeenSaved) { + push(ra); + } + Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); + DCHECK(!AreAliased(object, slot_address, value)); + // TODO(cbruni): Turn offset into int. + DCHECK(offset.IsImmediate()); + Add64(slot_address, object, offset); + CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode); + if (ra_status == kRAHasNotBeenSaved) { + pop(ra); + } + if (FLAG_debug_code) li(slot_address, Operand(kZapValue)); + + bind(&done); +} + +// --------------------------------------------------------------------------- +// Instruction macros. + +void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && + ((rt.rm().code() & 0b11000) == 0b01000)) { + c_addw(rd, rt.rm()); + } else { + addw(rd, rs, rt.rm()); + } + } else { + if (FLAG_riscv_c_extension && is_int6(rt.immediate()) && + (rd.code() == rs.code()) && (rd != zero_reg) && + !MustUseReg(rt.rmode())) { + c_addiw(rd, static_cast(rt.immediate())); + } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) { + addiw(rd, rs, static_cast(rt.immediate())); + } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) || + (2048 <= rt.immediate() && rt.immediate() <= 4094)) { + addiw(rd, rs, rt.immediate() / 2); + addiw(rd, rd, rt.immediate() - (rt.immediate() / 2)); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + addw(rd, rs, scratch); + } + } +} + +void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + (rt.rm() != zero_reg) && (rs != zero_reg)) { + c_add(rd, rt.rm()); + } else { + add(rd, rs, rt.rm()); + } + } else { + if (FLAG_riscv_c_extension && is_int6(rt.immediate()) && + (rd.code() == rs.code()) && (rd != zero_reg) && (rt.immediate() != 0) && + !MustUseReg(rt.rmode())) { + c_addi(rd, static_cast(rt.immediate())); + } else if (FLAG_riscv_c_extension && is_int10(rt.immediate()) && + (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) && + (rd.code() == rs.code()) && (rd == sp) && + !MustUseReg(rt.rmode())) { + c_addi16sp(static_cast(rt.immediate())); + } else if (FLAG_riscv_c_extension && ((rd.code() & 0b11000) == 0b01000) && + (rs == sp) && is_uint10(rt.immediate()) && + (rt.immediate() != 0) && !MustUseReg(rt.rmode())) { + c_addi4spn(rd, static_cast(rt.immediate())); + } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) { + addi(rd, rs, static_cast(rt.immediate())); + } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) || + (2048 <= rt.immediate() && rt.immediate() <= 4094)) { + addi(rd, rs, rt.immediate() / 2); + addi(rd, rd, rt.immediate() - (rt.immediate() / 2)); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Li(scratch, rt.immediate()); + add(rd, rs, scratch); + } + } +} + +void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && + ((rt.rm().code() & 0b11000) == 0b01000)) { + c_subw(rd, rt.rm()); + } else { + subw(rd, rs, rt.rm()); + } + } else { + DCHECK(is_int32(rt.immediate())); + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + (rd != zero_reg) && is_int6(-rt.immediate()) && + !MustUseReg(rt.rmode())) { + c_addiw( + rd, + static_cast( + -rt.immediate())); // No c_subiw instr, use c_addiw(x, y, -imm). + } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) { + addiw(rd, rs, + static_cast( + -rt.immediate())); // No subiw instr, use addiw(x, y, -imm). + } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) || + (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) { + addiw(rd, rs, -rt.immediate() / 2); + addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2)); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + if (-rt.immediate() >> 12 == 0 && !MustUseReg(rt.rmode())) { + // Use load -imm and addu when loading -imm generates one instruction. + Li(scratch, -rt.immediate()); + addw(rd, rs, scratch); + } else { + // li handles the relocation. + Li(scratch, rt.immediate()); + subw(rd, rs, scratch); + } + } + } +} + +void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && + ((rt.rm().code() & 0b11000) == 0b01000)) { + c_sub(rd, rt.rm()); + } else { + sub(rd, rs, rt.rm()); + } + } else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + (rd != zero_reg) && is_int6(-rt.immediate()) && + (rt.immediate() != 0) && !MustUseReg(rt.rmode())) { + c_addi(rd, + static_cast( + -rt.immediate())); // No c_subi instr, use c_addi(x, y, -imm). + + } else if (FLAG_riscv_c_extension && is_int10(-rt.immediate()) && + (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) && + (rd.code() == rs.code()) && (rd == sp) && + !MustUseReg(rt.rmode())) { + c_addi16sp(static_cast(-rt.immediate())); + } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) { + addi(rd, rs, + static_cast( + -rt.immediate())); // No subi instr, use addi(x, y, -imm). + } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) || + (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) { + addi(rd, rs, -rt.immediate() / 2); + addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2)); + } else { + int li_count = InstrCountForLi64Bit(rt.immediate()); + int li_neg_count = InstrCountForLi64Bit(-rt.immediate()); + if (li_neg_count < li_count && !MustUseReg(rt.rmode())) { + // Use load -imm and add when loading -imm generates one instruction. + DCHECK(rt.immediate() != std::numeric_limits::min()); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, -rt.immediate()); + add(rd, rs, scratch); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + sub(rd, rs, scratch); + } + } +} + +void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + mulw(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + mulw(rd, rs, scratch); + } +} + +void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + mul(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + mul(rd, rs, scratch); + } + srai(rd, rd, 32); +} + +void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt, + Register rsz, Register rtz) { + slli(rsz, rs, 32); + if (rt.is_reg()) { + slli(rtz, rt.rm(), 32); + } else { + Li(rtz, rt.immediate() << 32); + } + mulhu(rd, rsz, rtz); + srai(rd, rd, 32); +} + +void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + mul(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + mul(rd, rs, scratch); + } +} + +void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + mulh(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + mulh(rd, rs, scratch); + } +} + +void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) { + if (rt.is_reg()) { + divw(res, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + divw(res, rs, scratch); + } +} + +void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + remw(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + remw(rd, rs, scratch); + } +} + +void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + remuw(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + remuw(rd, rs, scratch); + } +} + +void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + div(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + div(rd, rs, scratch); + } +} + +void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) { + if (rt.is_reg()) { + divuw(res, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + divuw(res, rs, scratch); + } +} + +void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) { + if (rt.is_reg()) { + divu(res, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + divu(res, rs, scratch); + } +} + +void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + rem(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + rem(rd, rs, scratch); + } +} + +void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + remu(rd, rs, rt.rm()); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + remu(rd, rs, scratch); + } +} + +void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && + ((rt.rm().code() & 0b11000) == 0b01000)) { + c_and(rd, rt.rm()); + } else { + and_(rd, rs, rt.rm()); + } + } else { + if (FLAG_riscv_c_extension && is_int6(rt.immediate()) && + !MustUseReg(rt.rmode()) && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000)) { + c_andi(rd, static_cast(rt.immediate())); + } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) { + andi(rd, rs, static_cast(rt.immediate())); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + and_(rd, rs, scratch); + } + } +} + +void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && + ((rt.rm().code() & 0b11000) == 0b01000)) { + c_or(rd, rt.rm()); + } else { + or_(rd, rs, rt.rm()); + } + } else { + if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) { + ori(rd, rs, static_cast(rt.immediate())); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + or_(rd, rs, scratch); + } + } +} + +void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && + ((rt.rm().code() & 0b11000) == 0b01000)) { + c_xor(rd, rt.rm()); + } else { + xor_(rd, rs, rt.rm()); + } + } else { + if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) { + xori(rd, rs, static_cast(rt.immediate())); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Li(scratch, rt.immediate()); + xor_(rd, rs, scratch); + } + } +} + +void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + or_(rd, rs, rt.rm()); + not_(rd, rd); + } else { + Or(rd, rs, rt); + not_(rd, rd); + } +} + +void TurboAssembler::Neg(Register rs, const Operand& rt) { + DCHECK(rt.is_reg()); + neg(rs, rt.rm()); +} + +void TurboAssembler::Seqz(Register rd, const Operand& rt) { + if (rt.is_reg()) { + seqz(rd, rt.rm()); + } else { + li(rd, rt.immediate() == 0); + } +} + +void TurboAssembler::Snez(Register rd, const Operand& rt) { + if (rt.is_reg()) { + snez(rd, rt.rm()); + } else { + li(rd, rt.immediate() != 0); + } +} + +void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) { + if (rs == zero_reg) { + Seqz(rd, rt); + } else if (IsZero(rt)) { + seqz(rd, rs); + } else { + Sub64(rd, rs, rt); + seqz(rd, rd); + } +} + +void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) { + if (rs == zero_reg) { + Snez(rd, rt); + } else if (IsZero(rt)) { + snez(rd, rs); + } else { + Sub64(rd, rs, rt); + snez(rd, rd); + } +} + +void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + slt(rd, rs, rt.rm()); + } else { + if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) { + slti(rd, rs, static_cast(rt.immediate())); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Li(scratch, rt.immediate()); + slt(rd, rs, scratch); + } + } +} + +void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sltu(rd, rs, rt.rm()); + } else { + if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) { + sltiu(rd, rs, static_cast(rt.immediate())); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Li(scratch, rt.immediate()); + sltu(rd, rs, scratch); + } + } +} + +void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + slt(rd, rt.rm(), rs); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Li(scratch, rt.immediate()); + slt(rd, scratch, rs); + } + xori(rd, rd, 1); +} + +void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sltu(rd, rt.rm(), rs); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Li(scratch, rt.immediate()); + sltu(rd, scratch, rs); + } + xori(rd, rd, 1); +} + +void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) { + Slt(rd, rs, rt); + xori(rd, rd, 1); +} + +void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { + Sltu(rd, rs, rt); + xori(rd, rd, 1); +} + +void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + slt(rd, rt.rm(), rs); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Li(scratch, rt.immediate()); + slt(rd, scratch, rs); + } +} + +void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sltu(rd, rt.rm(), rs); + } else { + // li handles the relocation. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Li(scratch, rt.immediate()); + sltu(rd, scratch, rs); + } +} + +void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sllw(rd, rs, rt.rm()); + } else { + uint8_t shamt = static_cast(rt.immediate()); + slliw(rd, rs, shamt); + } +} + +void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sraw(rd, rs, rt.rm()); + } else { + uint8_t shamt = static_cast(rt.immediate()); + sraiw(rd, rs, shamt); + } +} + +void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + srlw(rd, rs, rt.rm()); + } else { + uint8_t shamt = static_cast(rt.immediate()); + srliw(rd, rs, shamt); + } +} + +void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sra(rd, rs, rt.rm()); + } else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) { + uint8_t shamt = static_cast(rt.immediate()); + c_srai(rd, shamt); + } else { + uint8_t shamt = static_cast(rt.immediate()); + srai(rd, rs, shamt); + } +} + +void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + srl(rd, rs, rt.rm()); + } else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) { + uint8_t shamt = static_cast(rt.immediate()); + c_srli(rd, shamt); + } else { + uint8_t shamt = static_cast(rt.immediate()); + srli(rd, rs, shamt); + } +} + +void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + sll(rd, rs, rt.rm()); + } else { + uint8_t shamt = static_cast(rt.immediate()); + if (FLAG_riscv_c_extension && (rd.code() == rs.code()) && + (rd != zero_reg) && (shamt != 0) && is_uint6(shamt)) { + c_slli(rd, shamt); + } else { + slli(rd, rs, shamt); + } + } +} + +void TurboAssembler::Li(Register rd, int64_t imm) { + if (FLAG_riscv_c_extension && (rd != zero_reg) && is_int6(imm)) { + c_li(rd, imm); + } else { + RV_li(rd, imm); + } +} + +void TurboAssembler::Mv(Register rd, const Operand& rt) { + if (FLAG_riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) { + c_mv(rd, rt.rm()); + } else { + mv(rd, rt.rm()); + } +} + +void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (rt.is_reg()) { + negw(scratch, rt.rm()); + sllw(scratch, rs, scratch); + srlw(rd, rs, rt.rm()); + or_(rd, scratch, rd); + sext_w(rd, rd); + } else { + int64_t ror_value = rt.immediate() % 32; + if (ror_value == 0) { + Mv(rd, rs); + return; + } else if (ror_value < 0) { + ror_value += 32; + } + srliw(scratch, rs, ror_value); + slliw(rd, rs, 32 - ror_value); + or_(rd, scratch, rd); + sext_w(rd, rd); + } +} + +void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (rt.is_reg()) { + negw(scratch, rt.rm()); + sll(scratch, rs, scratch); + srl(rd, rs, rt.rm()); + or_(rd, scratch, rd); + } else { + int64_t dror_value = rt.immediate() % 64; + if (dror_value == 0) { + Mv(rd, rs); + return; + } else if (dror_value < 0) { + dror_value += 64; + } + srli(scratch, rs, dror_value); + slli(rd, rs, 64 - dror_value); + or_(rd, scratch, rd); + } +} + +void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs, + uint8_t sa) { + DCHECK(sa >= 1 && sa <= 31); + UseScratchRegisterScope temps(this); + Register tmp = rd == rt ? temps.Acquire() : rd; + DCHECK(tmp != rt); + slli(tmp, rs, sa); + Add64(rd, rt, tmp); +} + +// ------------Pseudo-instructions------------- +// Change endianness +void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, + Register scratch) { + DCHECK_NE(scratch, rs); + DCHECK_NE(scratch, rd); + DCHECK(operand_size == 4 || operand_size == 8); + if (operand_size == 4) { + // Uint32_t x1 = 0x00FF00FF; + // x0 = (x0 << 16 | x0 >> 16); + // x0 = (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8)); + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK((rd != t6) && (rs != t6)); + Register x0 = temps.Acquire(); + Register x1 = temps.Acquire(); + Register x2 = scratch; + li(x1, 0x00FF00FF); + slliw(x0, rs, 16); + srliw(rd, rs, 16); + or_(x0, rd, x0); // x0 <- x0 << 16 | x0 >> 16 + and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF + slliw(x2, x2, 8); // x2 <- (x0 & x1) << 8 + slliw(x1, x1, 8); // x1 <- 0xFF00FF00 + and_(rd, x0, x1); // x0 & 0xFF00FF00 + srliw(rd, rd, 8); + or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8)) + } else { + // uinx24_t x1 = 0x0000FFFF0000FFFFl; + // uinx24_t x1 = 0x00FF00FF00FF00FFl; + // x0 = (x0 << 32 | x0 >> 32); + // x0 = (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16; + // x0 = (x0 & x1) << 8 | (x0 & (x1 << 8)) >> 8; + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + DCHECK((rd != t6) && (rs != t6)); + Register x0 = temps.Acquire(); + Register x1 = temps.Acquire(); + Register x2 = scratch; + li(x1, 0x0000FFFF0000FFFFl); + slli(x0, rs, 32); + srli(rd, rs, 32); + or_(x0, rd, x0); // x0 <- x0 << 32 | x0 >> 32 + and_(x2, x0, x1); // x2 <- x0 & 0x0000FFFF0000FFFF + slli(x2, x2, 16); // x2 <- (x0 & 0x0000FFFF0000FFFF) << 16 + slli(x1, x1, 16); // x1 <- 0xFFFF0000FFFF0000 + and_(rd, x0, x1); // rd <- x0 & 0xFFFF0000FFFF0000 + srli(rd, rd, 16); // rd <- x0 & (x1 << 16)) >> 16 + or_(x0, rd, x2); // (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16; + li(x1, 0x00FF00FF00FF00FFl); + and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF00FF00FF + slli(x2, x2, 8); // x2 <- (x0 & x1) << 8 + slli(x1, x1, 8); // x1 <- 0xFF00FF00FF00FF00 + and_(rd, x0, x1); + srli(rd, rd, 8); // rd <- (x0 & (x1 << 8)) >> 8 + or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8)) + } +} + +template +void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs, + Register scratch) { + DCHECK(rd != rs.rm() && rd != scratch); + DCHECK_LE(NBYTES, 8); + + // load the most significant byte + if (LOAD_SIGNED) { + lb(rd, rs.rm(), rs.offset() + (NBYTES - 1)); + } else { + lbu(rd, rs.rm(), rs.offset() + (NBYTES - 1)); + } + + // load remaining (nbytes-1) bytes from higher to lower + slli(rd, rd, 8 * (NBYTES - 1)); + for (int i = (NBYTES - 2); i >= 0; i--) { + lbu(scratch, rs.rm(), rs.offset() + i); + if (i) slli(scratch, scratch, i * 8); + or_(rd, rd, scratch); + } +} + +template +void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs, + Register scratch0, + Register scratch1) { + // This function loads nbytes from memory specified by rs and into rs.rm() + DCHECK(rs.rm() != scratch0 && rs.rm() != scratch1 && scratch0 != scratch1); + DCHECK_LE(NBYTES, 8); + + // load the most significant byte + if (LOAD_SIGNED) { + lb(scratch0, rs.rm(), rs.offset() + (NBYTES - 1)); + } else { + lbu(scratch0, rs.rm(), rs.offset() + (NBYTES - 1)); + } + + // load remaining (nbytes-1) bytes from higher to lower + slli(scratch0, scratch0, 8 * (NBYTES - 1)); + for (int i = (NBYTES - 2); i >= 0; i--) { + lbu(scratch1, rs.rm(), rs.offset() + i); + if (i) { + slli(scratch1, scratch1, i * 8); + or_(scratch0, scratch0, scratch1); + } else { + // write to rs.rm() when processing the last byte + or_(rs.rm(), scratch0, scratch1); + } + } +} + +template +void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) { + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + + if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) { + // Adjust offset for two accesses and check if offset + 3 fits into int12. + MemOperand source = rs; + Register scratch_base = temps.Acquire(); + DCHECK(scratch_base != rs.rm()); + AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES, + NBYTES - 1); + + // Since source.rm() is scratch_base, assume rd != source.rm() + DCHECK(rd != source.rm()); + Register scratch_other = temps.Acquire(); + LoadNBytes(rd, source, scratch_other); + } else { + // no need to adjust base-and-offset + if (rd != rs.rm()) { + Register scratch = temps.Acquire(); + LoadNBytes(rd, rs, scratch); + } else { // rd == rs.rm() + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + LoadNBytesOverwritingBaseReg(rs, scratch, scratch2); + } + } +} + +template +void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, + Register scratch_base) { + DCHECK(NBYTES == 4 || NBYTES == 8); + DCHECK_NE(scratch_base, rs.rm()); + BlockTrampolinePoolScope block_trampoline_pool(this); + MemOperand source = rs; + if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) { + // Adjust offset for two accesses and check if offset + 3 fits into int12. + DCHECK(scratch_base != rs.rm()); + AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES, + NBYTES - 1); + } + UseScratchRegisterScope temps(this); + Register scratch_other = temps.Acquire(); + Register scratch = temps.Acquire(); + DCHECK(scratch != rs.rm() && scratch_other != scratch && + scratch_other != rs.rm()); + LoadNBytes(scratch, source, scratch_other); + if (NBYTES == 4) + fmv_w_x(frd, scratch); + else + fmv_d_x(frd, scratch); +} + +template +void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs, + Register scratch_other) { + DCHECK(scratch_other != rs.rm()); + DCHECK_LE(NBYTES, 8); + MemOperand source = rs; + UseScratchRegisterScope temps(this); + Register scratch_base = temps.Acquire(); + // Adjust offset for two accesses and check if offset + 3 fits into int12. + if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) { + DCHECK(scratch_base != rd && scratch_base != rs.rm()); + AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES, + NBYTES - 1); + } + + BlockTrampolinePoolScope block_trampoline_pool(this); + if (scratch_other == no_reg) { + if (temps.hasAvailable()) { + scratch_other = temps.Acquire(); + } else { + push(t2); + scratch_other = t2; + } + } + + DCHECK(scratch_other != rd && scratch_other != rs.rm() && + scratch_other != source.rm()); + + sb(rd, source.rm(), source.offset()); + for (size_t i = 1; i <= (NBYTES - 1); i++) { + srli(scratch_other, rd, i * 8); + sb(scratch_other, source.rm(), source.offset() + i); + } + if (scratch_other == t2) { + pop(t2); + } +} + +template +void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, + const MemOperand& rs, + Register scratch) { + DCHECK(NBYTES == 8 || NBYTES == 4); + DCHECK_NE(scratch, rs.rm()); + if (NBYTES == 4) { + fmv_x_w(scratch, frd); + } else { + fmv_x_d(scratch, frd); + } + UnalignedStoreHelper(scratch, rs); +} + +template +void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs, + Func generator) { + MemOperand source = rs; + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (NeedAdjustBaseAndOffset(source)) { + Register scratch = temps.Acquire(); + DCHECK(scratch != rs.rm()); + AdjustBaseAndOffset(&source, scratch); + } + generator(target, source); +} + +template +void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs, + Func generator) { + MemOperand source = rs; + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (NeedAdjustBaseAndOffset(source)) { + Register scratch = temps.Acquire(); + // make sure scratch does not overwrite value + if (std::is_same::value) + DCHECK(scratch.code() != value.code()); + DCHECK(scratch != rs.rm()); + AdjustBaseAndOffset(&source, scratch); + } + generator(value, source); +} + +void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { + UnalignedLoadHelper<4, true>(rd, rs); +} + +void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) { + UnalignedLoadHelper<4, false>(rd, rs); +} + +void TurboAssembler::Usw(Register rd, const MemOperand& rs) { + UnalignedStoreHelper<4>(rd, rs); +} + +void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { + UnalignedLoadHelper<2, true>(rd, rs); +} + +void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { + UnalignedLoadHelper<2, false>(rd, rs); +} + +void TurboAssembler::Ush(Register rd, const MemOperand& rs) { + UnalignedStoreHelper<2>(rd, rs); +} + +void TurboAssembler::Uld(Register rd, const MemOperand& rs) { + UnalignedLoadHelper<8, true>(rd, rs); +} + +// Load consequent 32-bit word pair in 64-bit reg. and put first word in low +// bits, +// second word in high bits. +void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Lwu(rd, rs); + Lw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2)); + slli(scratch, scratch, 32); + Add64(rd, rd, scratch); +} + +void TurboAssembler::Usd(Register rd, const MemOperand& rs) { + UnalignedStoreHelper<8>(rd, rs); +} + +// Do 64-bit store as two consequent 32-bit stores to unaligned address. +void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Sw(rd, rs); + srai(scratch, rd, 32); + Sw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2)); +} + +void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs, + Register scratch) { + DCHECK_NE(scratch, rs.rm()); + UnalignedFLoadHelper<4>(fd, rs, scratch); +} + +void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs, + Register scratch) { + DCHECK_NE(scratch, rs.rm()); + UnalignedFStoreHelper<4>(fd, rs, scratch); +} + +void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs, + Register scratch) { + DCHECK_NE(scratch, rs.rm()); + UnalignedFLoadHelper<8>(fd, rs, scratch); +} + +void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs, + Register scratch) { + DCHECK_NE(scratch, rs.rm()); + UnalignedFStoreHelper<8>(fd, rs, scratch); +} + +void TurboAssembler::Lb(Register rd, const MemOperand& rs) { + auto fn = [this](Register target, const MemOperand& source) { + this->lb(target, source.rm(), source.offset()); + }; + AlignedLoadHelper(rd, rs, fn); +} + +void TurboAssembler::Lbu(Register rd, const MemOperand& rs) { + auto fn = [this](Register target, const MemOperand& source) { + this->lbu(target, source.rm(), source.offset()); + }; + AlignedLoadHelper(rd, rs, fn); +} + +void TurboAssembler::Sb(Register rd, const MemOperand& rs) { + auto fn = [this](Register value, const MemOperand& source) { + this->sb(value, source.rm(), source.offset()); + }; + AlignedStoreHelper(rd, rs, fn); +} + +void TurboAssembler::Lh(Register rd, const MemOperand& rs) { + auto fn = [this](Register target, const MemOperand& source) { + this->lh(target, source.rm(), source.offset()); + }; + AlignedLoadHelper(rd, rs, fn); +} + +void TurboAssembler::Lhu(Register rd, const MemOperand& rs) { + auto fn = [this](Register target, const MemOperand& source) { + this->lhu(target, source.rm(), source.offset()); + }; + AlignedLoadHelper(rd, rs, fn); +} + +void TurboAssembler::Sh(Register rd, const MemOperand& rs) { + auto fn = [this](Register value, const MemOperand& source) { + this->sh(value, source.rm(), source.offset()); + }; + AlignedStoreHelper(rd, rs, fn); +} + +void TurboAssembler::Lw(Register rd, const MemOperand& rs) { + auto fn = [this](Register target, const MemOperand& source) { + if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && + ((source.rm().code() & 0b11000) == 0b01000) && + is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) { + this->c_lw(target, source.rm(), source.offset()); + } else if (FLAG_riscv_c_extension && (target != zero_reg) && + is_uint8(source.offset()) && (source.rm() == sp) && + ((source.offset() & 0x3) == 0)) { + this->c_lwsp(target, source.offset()); + } else { + this->lw(target, source.rm(), source.offset()); + } + }; + AlignedLoadHelper(rd, rs, fn); +} + +void TurboAssembler::Lwu(Register rd, const MemOperand& rs) { + auto fn = [this](Register target, const MemOperand& source) { + this->lwu(target, source.rm(), source.offset()); + }; + AlignedLoadHelper(rd, rs, fn); +} + +void TurboAssembler::Sw(Register rd, const MemOperand& rs) { + auto fn = [this](Register value, const MemOperand& source) { + if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && + ((source.rm().code() & 0b11000) == 0b01000) && + is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) { + this->c_sw(value, source.rm(), source.offset()); + } else if (FLAG_riscv_c_extension && (source.rm() == sp) && + is_uint8(source.offset()) && (((source.offset() & 0x3) == 0))) { + this->c_swsp(value, source.offset()); + } else { + this->sw(value, source.rm(), source.offset()); + } + }; + AlignedStoreHelper(rd, rs, fn); +} + +void TurboAssembler::Ld(Register rd, const MemOperand& rs) { + auto fn = [this](Register target, const MemOperand& source) { + if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && + ((source.rm().code() & 0b11000) == 0b01000) && + is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) { + this->c_ld(target, source.rm(), source.offset()); + } else if (FLAG_riscv_c_extension && (target != zero_reg) && + is_uint9(source.offset()) && (source.rm() == sp) && + ((source.offset() & 0x7) == 0)) { + this->c_ldsp(target, source.offset()); + } else { + this->ld(target, source.rm(), source.offset()); + } + }; + AlignedLoadHelper(rd, rs, fn); +} + +void TurboAssembler::Sd(Register rd, const MemOperand& rs) { + auto fn = [this](Register value, const MemOperand& source) { + if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && + ((source.rm().code() & 0b11000) == 0b01000) && + is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) { + this->c_sd(value, source.rm(), source.offset()); + } else if (FLAG_riscv_c_extension && (source.rm() == sp) && + is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) { + this->c_sdsp(value, source.offset()); + } else { + this->sd(value, source.rm(), source.offset()); + } + }; + AlignedStoreHelper(rd, rs, fn); +} + +void TurboAssembler::LoadFloat(FPURegister fd, const MemOperand& src) { + auto fn = [this](FPURegister target, const MemOperand& source) { + this->flw(target, source.rm(), source.offset()); + }; + AlignedLoadHelper(fd, src, fn); +} + +void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) { + auto fn = [this](FPURegister value, const MemOperand& source) { + this->fsw(value, source.rm(), source.offset()); + }; + AlignedStoreHelper(fs, src, fn); +} + +void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) { + auto fn = [this](FPURegister target, const MemOperand& source) { + if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && + ((source.rm().code() & 0b11000) == 0b01000) && + is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) { + this->c_fld(target, source.rm(), source.offset()); + } else if (FLAG_riscv_c_extension && (source.rm() == sp) && + is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) { + this->c_fldsp(target, source.offset()); + } else { + this->fld(target, source.rm(), source.offset()); + } + }; + AlignedLoadHelper(fd, src, fn); +} + +void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) { + auto fn = [this](FPURegister value, const MemOperand& source) { + if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && + ((source.rm().code() & 0b11000) == 0b01000) && + is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) { + this->c_fsd(value, source.rm(), source.offset()); + } else if (FLAG_riscv_c_extension && (source.rm() == sp) && + is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) { + this->c_fsdsp(value, source.offset()); + } else { + this->fsd(value, source.rm(), source.offset()); + } + }; + AlignedStoreHelper(fs, src, fn); +} + +void TurboAssembler::Ll(Register rd, const MemOperand& rs) { + bool is_one_instruction = rs.offset() == 0; + if (is_one_instruction) { + lr_w(false, false, rd, rs.rm()); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Add64(scratch, rs.rm(), rs.offset()); + lr_w(false, false, rd, scratch); + } +} + +void TurboAssembler::Lld(Register rd, const MemOperand& rs) { + bool is_one_instruction = rs.offset() == 0; + if (is_one_instruction) { + lr_d(false, false, rd, rs.rm()); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Add64(scratch, rs.rm(), rs.offset()); + lr_d(false, false, rd, scratch); + } +} + +void TurboAssembler::Sc(Register rd, const MemOperand& rs) { + bool is_one_instruction = rs.offset() == 0; + if (is_one_instruction) { + sc_w(false, false, rd, rs.rm(), rd); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Add64(scratch, rs.rm(), rs.offset()); + sc_w(false, false, rd, scratch, rd); + } +} + +void TurboAssembler::Scd(Register rd, const MemOperand& rs) { + bool is_one_instruction = rs.offset() == 0; + if (is_one_instruction) { + sc_d(false, false, rd, rs.rm(), rd); + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Add64(scratch, rs.rm(), rs.offset()); + sc_d(false, false, rd, scratch, rd); + } +} + +void TurboAssembler::li(Register dst, Handle value, + RelocInfo::Mode rmode) { + // TODO(jgruber,v8:8887): Also consider a root-relative load when generating + // non-isolate-independent code. In many cases it might be cheaper than + // embedding the relocatable value. + if (root_array_available_ && options().isolate_independent_code) { + IndirectLoadConstant(dst, value); + return; + } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) { + EmbeddedObjectIndex index = AddEmbeddedObject(value); + DCHECK(is_uint32(index)); + li(dst, Operand(index, rmode)); + } else { + DCHECK(RelocInfo::IsFullEmbeddedObject(rmode)); + li(dst, Operand(value.address(), rmode)); + } +} + +void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { + // TODO(jgruber,v8:8887): Also consider a root-relative load when generating + // non-isolate-independent code. In many cases it might be cheaper than + // embedding the relocatable value. + if (root_array_available_ && options().isolate_independent_code) { + IndirectLoadExternalReference(dst, value); + return; + } + li(dst, Operand(value), mode); +} + +void TurboAssembler::li(Register dst, const StringConstantBase* string, + LiFlags mode) { + li(dst, Operand::EmbeddedStringConstant(string), mode); +} + +static inline int InstrCountForLiLower32Bit(int64_t value) { + int64_t Hi20 = ((value + 0x800) >> 12); + int64_t Lo12 = value << 52 >> 52; + if (Hi20 == 0 || Lo12 == 0) { + return 1; + } + return 2; +} + +int TurboAssembler::InstrCountForLi64Bit(int64_t value) { + if (is_int32(value + 0x800)) { + return InstrCountForLiLower32Bit(value); + } else { + return li_estimate(value); + } + UNREACHABLE(); + return INT_MAX; +} + +void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { + DCHECK(!j.is_reg()); + DCHECK(!MustUseReg(j.rmode())); + DCHECK(mode == OPTIMIZE_SIZE); + Li(rd, j.immediate()); +} + +void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { + DCHECK(!j.is_reg()); + BlockTrampolinePoolScope block_trampoline_pool(this); + if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { + UseScratchRegisterScope temps(this); + int count = li_estimate(j.immediate(), temps.hasAvailable()); + int reverse_count = li_estimate(~j.immediate(), temps.hasAvailable()); + if (FLAG_riscv_constant_pool && count >= 4 && reverse_count >= 4) { + // Ld a Address from a constant pool. + RecordEntry((uint64_t)j.immediate(), j.rmode()); + auipc(rd, 0); + // Record a value into constant pool. + ld(rd, rd, 0); + } else { + if ((count - reverse_count) > 1) { + Li(rd, ~j.immediate()); + not_(rd, rd); + } else { + Li(rd, j.immediate()); + } + } + } else if (MustUseReg(j.rmode())) { + int64_t immediate; + if (j.IsHeapObjectRequest()) { + RequestHeapObject(j.heap_object_request()); + immediate = 0; + } else { + immediate = j.immediate(); + } + + RecordRelocInfo(j.rmode(), immediate); + li_ptr(rd, immediate); + } else if (mode == ADDRESS_LOAD) { + // We always need the same number of instructions as we may need to patch + // this code to load another value which may need all 6 instructions. + RecordRelocInfo(j.rmode()); + li_ptr(rd, j.immediate()); + } else { // Always emit the same 48 bit instruction + // sequence. + li_ptr(rd, j.immediate()); + } +} + +static RegList t_regs = {t0, t1, t2, t3, t4, t5, t6}; +static RegList a_regs = {a0, a1, a2, a3, a4, a5, a6, a7}; +static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11}; + +void TurboAssembler::MultiPush(RegList regs) { + int16_t num_to_push = regs.Count(); + int16_t stack_offset = num_to_push * kSystemPointerSize; + +#define TEST_AND_PUSH_REG(reg) \ + if (regs.has(reg)) { \ + stack_offset -= kSystemPointerSize; \ + Sd(reg, MemOperand(sp, stack_offset)); \ + regs.clear(reg); \ + } + +#define T_REGS(V) V(t6) V(t5) V(t4) V(t3) V(t2) V(t1) V(t0) +#define A_REGS(V) V(a7) V(a6) V(a5) V(a4) V(a3) V(a2) V(a1) V(a0) +#define S_REGS(V) \ + V(s11) V(s10) V(s9) V(s8) V(s7) V(s6) V(s5) V(s4) V(s3) V(s2) V(s1) + + Sub64(sp, sp, Operand(stack_offset)); + + // Certain usage of MultiPush requires that registers are pushed onto the + // stack in a particular: ra, fp, sp, gp, .... (basically in the decreasing + // order of register numbers according to MIPS register numbers) + TEST_AND_PUSH_REG(ra); + TEST_AND_PUSH_REG(fp); + TEST_AND_PUSH_REG(sp); + TEST_AND_PUSH_REG(gp); + TEST_AND_PUSH_REG(tp); + if (!(regs & s_regs).is_empty()) { + S_REGS(TEST_AND_PUSH_REG) + } + if (!(regs & a_regs).is_empty()) { + A_REGS(TEST_AND_PUSH_REG) + } + if (!(regs & t_regs).is_empty()) { + T_REGS(TEST_AND_PUSH_REG) + } + + DCHECK(regs.is_empty()); + +#undef TEST_AND_PUSH_REG +#undef T_REGS +#undef A_REGS +#undef S_REGS +} + +void TurboAssembler::MultiPop(RegList regs) { + int16_t stack_offset = 0; + +#define TEST_AND_POP_REG(reg) \ + if (regs.has(reg)) { \ + Ld(reg, MemOperand(sp, stack_offset)); \ + stack_offset += kSystemPointerSize; \ + regs.clear(reg); \ + } + +#define T_REGS(V) V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) +#define A_REGS(V) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) +#define S_REGS(V) \ + V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) V(s10) V(s11) + + // MultiPop pops from the stack in reverse order as MultiPush + if (!(regs & t_regs).is_empty()) { + T_REGS(TEST_AND_POP_REG) + } + if (!(regs & a_regs).is_empty()) { + A_REGS(TEST_AND_POP_REG) + } + if (!(regs & s_regs).is_empty()) { + S_REGS(TEST_AND_POP_REG) + } + TEST_AND_POP_REG(tp); + TEST_AND_POP_REG(gp); + TEST_AND_POP_REG(sp); + TEST_AND_POP_REG(fp); + TEST_AND_POP_REG(ra); + + DCHECK(regs.is_empty()); + + addi(sp, sp, stack_offset); + +#undef TEST_AND_POP_REG +#undef T_REGS +#undef S_REGS +#undef A_REGS +} + +void TurboAssembler::MultiPushFPU(DoubleRegList regs) { + int16_t num_to_push = regs.Count(); + int16_t stack_offset = num_to_push * kDoubleSize; + + Sub64(sp, sp, Operand(stack_offset)); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + if ((regs.bits() & (1 << i)) != 0) { + stack_offset -= kDoubleSize; + StoreDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + } + } +} + +void TurboAssembler::MultiPopFPU(DoubleRegList regs) { + int16_t stack_offset = 0; + + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs.bits() & (1 << i)) != 0) { + LoadDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset)); + stack_offset += kDoubleSize; + } + } + addi(sp, sp, stack_offset); +} + +void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos, + uint16_t size, bool sign_extend) { + DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && + pos + size <= 64); + slli(rt, rs, 64 - (pos + size)); + if (sign_extend) { + srai(rt, rt, 64 - size); + } else { + srli(rt, rt, 64 - size); + } +} + +void TurboAssembler::InsertBits(Register dest, Register source, Register pos, + int size) { + DCHECK_LT(size, 64); + UseScratchRegisterScope temps(this); + Register mask = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register source_ = temps.Acquire(); + // Create a mask of the length=size. + li(mask, 1); + slli(mask, mask, size); + addi(mask, mask, -1); + and_(source_, mask, source); + sll(source_, source_, pos); + // Make a mask containing 0's. 0's start at "pos" with length=size. + sll(mask, mask, pos); + not_(mask, mask); + // cut area for insertion of source. + and_(dest, mask, dest); + // insert source + or_(dest, dest, source_); +} + +void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); } + +void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); } + +void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) { + // Convert rs to a FP value in fd. + fcvt_d_wu(fd, rs); +} + +void TurboAssembler::Cvt_d_w(FPURegister fd, Register rs) { + // Convert rs to a FP value in fd. + fcvt_d_w(fd, rs); +} + +void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) { + // Convert rs to a FP value in fd. + fcvt_d_lu(fd, rs); +} + +void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) { + // Convert rs to a FP value in fd. + fcvt_s_wu(fd, rs); +} + +void TurboAssembler::Cvt_s_w(FPURegister fd, Register rs) { + // Convert rs to a FP value in fd. + fcvt_s_w(fd, rs); +} + +void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) { + // Convert rs to a FP value in fd. + fcvt_s_lu(fd, rs); +} + +template +void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs, + Register result, + CvtFunc fcvt_generator) { + // Save csr_fflags to scratch & clear exception flags + if (result.is_valid()) { + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + + int exception_flags = kInvalidOperation; + csrrci(scratch, csr_fflags, exception_flags); + + // actual conversion instruction + fcvt_generator(this, rd, fs); + + // check kInvalidOperation flag (out-of-range, NaN) + // set result to 1 if normal, otherwise set result to 0 for abnormal + frflags(result); + andi(result, result, exception_flags); + seqz(result, result); // result <-- 1 (normal), result <-- 0 (abnormal) + + // restore csr_fflags + csrw(csr_fflags, scratch); + } else { + // actual conversion instruction + fcvt_generator(this, rd, fs); + } +} + +void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) { + Label no_nan; + feq_d(kScratchReg, fs, fs); + bnez(kScratchReg, &no_nan); + Move(rd, zero_reg); + bind(&no_nan); +} + +void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) { + Label no_nan; + feq_s(kScratchReg, fs, fs); + bnez(kScratchReg, &no_nan); + Move(rd, zero_reg); + bind(&no_nan); +} + +void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_wu_d(dst, src, RTZ); + }); +} + +void TurboAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_d(dst, src, RTZ); + }); +} + +void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_wu_s(dst, src, RTZ); + }); +} + +void TurboAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_s(dst, src, RTZ); + }); +} + +void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_lu_d(dst, src, RTZ); + }); +} + +void TurboAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_l_d(dst, src, RTZ); + }); +} + +void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_lu_s(dst, src, RTZ); + }); +} + +void TurboAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_l_s(dst, src, RTZ); + }); +} + +void TurboAssembler::Round_w_s(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_s(dst, src, RNE); + }); +} + +void TurboAssembler::Round_w_d(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_d(dst, src, RNE); + }); +} + +void TurboAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_s(dst, src, RUP); + }); +} + +void TurboAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_d(dst, src, RUP); + }); +} + +void TurboAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_s(dst, src, RDN); + }); +} + +void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) { + RoundFloatingPointToInteger( + rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { + tasm->fcvt_w_d(dst, src, RDN); + }); +} + +// According to JS ECMA specification, for floating-point round operations, if +// the input is NaN, +/-infinity, or +/-0, the same input is returned as the +// rounded result; this differs from behavior of RISCV fcvt instructions (which +// round out-of-range values to the nearest max or min value), therefore special +// handling is needed by NaN, +/-Infinity, +/-0 +template +void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src, + FPURegister fpu_scratch, RoundingMode frm) { + BlockTrampolinePoolScope block_trampoline_pool(this); + UseScratchRegisterScope temps(this); + Register scratch2 = temps.Acquire(); + + DCHECK((std::is_same::value) || (std::is_same::value)); + // Need at least two FPRs, so check against dst == src == fpu_scratch + DCHECK(!(dst == src && dst == fpu_scratch)); + + const int kFloatMantissaBits = + sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits; + const int kFloatExponentBits = + sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits; + const int kFloatExponentBias = + sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias; + Label done; + + { + UseScratchRegisterScope temps2(this); + Register scratch = temps2.Acquire(); + // extract exponent value of the source floating-point to scratch + if (std::is_same::value) { + fmv_x_d(scratch, src); + } else { + fmv_x_w(scratch, src); + } + ExtractBits(scratch2, scratch, kFloatMantissaBits, kFloatExponentBits); + } + + // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits + // in mantissa, the result is the same as src, so move src to dest (to avoid + // generating another branch) + if (dst != src) { + if (std::is_same::value) { + fmv_d(dst, src); + } else { + fmv_s(dst, src); + } + } + { + Label not_NaN; + UseScratchRegisterScope temps2(this); + Register scratch = temps2.Acquire(); + // According to the wasm spec + // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans) + // if input is canonical NaN, then output is canonical NaN, and if input is + // any other NaN, then output is any NaN with most significant bit of + // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If + // src is not a NaN, branch to the label and do nothing, but if it is, + // fmin_d will set dst to the canonical NaN. + if (std::is_same::value) { + feq_d(scratch, src, src); + bnez(scratch, ¬_NaN); + fmin_d(dst, src, src); + } else { + feq_s(scratch, src, src); + bnez(scratch, ¬_NaN); + fmin_s(dst, src, src); + } + bind(¬_NaN); + } + + // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than + // kFloat32MantissaBits, it means the floating-point value has no fractional + // part, thus the input is already rounded, jump to done. Note that, NaN and + // Infinity in floating-point representation sets maximal exponent value, so + // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits), + // and JS round semantics specify that rounding of NaN (Infinity) returns NaN + // (Infinity), so NaN and Infinity are considered rounded value too. + Branch(&done, greater_equal, scratch2, + Operand(kFloatExponentBias + kFloatMantissaBits)); + + // Actual rounding is needed along this path + + // old_src holds the original input, needed for the case of src == dst + FPURegister old_src = src; + if (src == dst) { + DCHECK(fpu_scratch != dst); + Move(fpu_scratch, src); + old_src = fpu_scratch; + } + + // Since only input whose real exponent value is less than kMantissaBits + // (i.e., 23 or 52-bits) falls into this path, the value range of the input + // falls into that of 23- or 53-bit integers. So we round the input to integer + // values, then convert them back to floating-point. + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + if (std::is_same::value) { + fcvt_l_d(scratch, src, frm); + fcvt_d_l(dst, scratch, frm); + } else { + fcvt_w_s(scratch, src, frm); + fcvt_s_w(dst, scratch, frm); + } + } + // A special handling is needed if the input is a very small positive/negative + // number that rounds to zero. JS semantics requires that the rounded result + // retains the sign of the input, so a very small positive (negative) + // floating-point number should be rounded to positive (negative) 0. + // Therefore, we use sign-bit injection to produce +/-0 correctly. Instead of + // testing for zero w/ a branch, we just insert sign-bit for everyone on this + // path (this is where old_src is needed) + if (std::is_same::value) { + fsgnj_d(dst, dst, old_src); + } else { + fsgnj_s(dst, dst, old_src); + } + + bind(&done); +} + +// According to JS ECMA specification, for floating-point round operations, if +// the input is NaN, +/-infinity, or +/-0, the same input is returned as the +// rounded result; this differs from behavior of RISCV fcvt instructions (which +// round out-of-range values to the nearest max or min value), therefore special +// handling is needed by NaN, +/-Infinity, +/-0 +template +void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch, RoundingMode frm) { + VU.set(scratch, std::is_same::value ? E32 : E64, m1); + // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits + // in mantissa, the result is the same as src, so move src to dest (to avoid + // generating another branch) + + // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than + // kFloat32MantissaBits, it means the floating-point value has no fractional + // part, thus the input is already rounded, jump to done. Note that, NaN and + // Infinity in floating-point representation sets maximal exponent value, so + // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits), + // and JS round semantics specify that rounding of NaN (Infinity) returns NaN + // (Infinity), so NaN and Infinity are considered rounded value too. + const int kFloatMantissaBits = + sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits; + const int kFloatExponentBits = + sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits; + const int kFloatExponentBias = + sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias; + + // slli(rt, rs, 64 - (pos + size)); + // if (sign_extend) { + // srai(rt, rt, 64 - size); + // } else { + // srli(rt, rt, 64 - size); + // } + + li(scratch, 64 - kFloatMantissaBits - kFloatExponentBits); + vsll_vx(v_scratch, src, scratch); + li(scratch, 64 - kFloatExponentBits); + vsrl_vx(v_scratch, v_scratch, scratch); + li(scratch, kFloatExponentBias + kFloatMantissaBits); + vmslt_vx(v0, v_scratch, scratch); + + VU.set(frm); + vmv_vv(dst, src); + if (dst == src) { + vmv_vv(v_scratch, src); + } + vfcvt_x_f_v(dst, src, MaskType::Mask); + vfcvt_f_x_v(dst, dst, MaskType::Mask); + + // A special handling is needed if the input is a very small positive/negative + // number that rounds to zero. JS semantics requires that the rounded result + // retains the sign of the input, so a very small positive (negative) + // floating-point number should be rounded to positive (negative) 0. + if (dst == src) { + vfsngj_vv(dst, dst, v_scratch); + } else { + vfsngj_vv(dst, dst, src); + } +} + +void TurboAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RUP); +} + +void TurboAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RUP); +} + +void TurboAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RDN); +} + +void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RDN); +} + +void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RTZ); +} + +void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RTZ); +} + +void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RNE); +} + +void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch, + VRegister v_scratch) { + RoundHelper(vdst, vsrc, scratch, v_scratch, RNE); +} + +void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RDN); +} + +void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RUP); +} + +void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RTZ); +} + +void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RNE); +} + +void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RDN); +} + +void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RUP); +} + +void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RTZ); +} + +void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src, + FPURegister fpu_scratch) { + RoundHelper(dst, src, fpu_scratch, RNE); +} + +void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + fmadd_s(fd, fs, ft, fr); +} + +void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + fmadd_d(fd, fs, ft, fr); +} + +void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + fmsub_s(fd, fs, ft, fr); +} + +void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + fmsub_d(fd, fs, ft, fr); +} + +void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, + FPURegister cmp2) { + switch (cc) { + case EQ: + feq_s(rd, cmp1, cmp2); + break; + case NE: + feq_s(rd, cmp1, cmp2); + NegateBool(rd, rd); + break; + case LT: + flt_s(rd, cmp1, cmp2); + break; + case GE: + fle_s(rd, cmp2, cmp1); + break; + case LE: + fle_s(rd, cmp1, cmp2); + break; + case GT: + flt_s(rd, cmp2, cmp1); + break; + default: + UNREACHABLE(); + } +} + +void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, + FPURegister cmp2) { + switch (cc) { + case EQ: + feq_d(rd, cmp1, cmp2); + break; + case NE: + feq_d(rd, cmp1, cmp2); + NegateBool(rd, rd); + break; + case LT: + flt_d(rd, cmp1, cmp2); + break; + case GE: + fle_d(rd, cmp2, cmp1); + break; + case LE: + fle_d(rd, cmp1, cmp2); + break; + case GT: + flt_d(rd, cmp2, cmp1); + break; + default: + UNREACHABLE(); + } +} + +void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1, + FPURegister cmp2) { + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch = temps.Acquire(); + + feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1) + feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2) + And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2) +} + +void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1, + FPURegister cmp2) { + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch = temps.Acquire(); + + feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1) + feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2) + And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2) +} + +void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1, + FPURegister cmp2) { + CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2) + Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2) +} + +void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1, + FPURegister cmp2) { + CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2) + Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2) +} + +void TurboAssembler::BranchTrueShortF(Register rs, Label* target) { + Branch(target, not_equal, rs, Operand(zero_reg)); +} + +void TurboAssembler::BranchFalseShortF(Register rs, Label* target) { + Branch(target, equal, rs, Operand(zero_reg)); +} + +void TurboAssembler::BranchTrueF(Register rs, Label* target) { + bool long_branch = + target->is_bound() ? !is_near(target) : is_trampoline_emitted(); + if (long_branch) { + Label skip; + BranchFalseShortF(rs, &skip); + BranchLong(target); + bind(&skip); + } else { + BranchTrueShortF(rs, target); + } +} + +void TurboAssembler::BranchFalseF(Register rs, Label* target) { + bool long_branch = + target->is_bound() ? !is_near(target) : is_trampoline_emitted(); + if (long_branch) { + Label skip; + BranchTrueShortF(rs, &skip); + BranchLong(target); + bind(&skip); + } else { + BranchFalseShortF(rs, target); + } +} + +void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + + DCHECK(src_high != scratch2 && src_high != scratch); + + fmv_x_d(scratch, dst); + slli(scratch2, src_high, 32); + slli(scratch, scratch, 32); + srli(scratch, scratch, 32); + or_(scratch, scratch, scratch2); + fmv_d_x(dst, scratch); +} + +void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + + DCHECK(src_low != scratch && src_low != scratch2); + fmv_x_d(scratch, dst); + slli(scratch2, src_low, 32); + srli(scratch2, scratch2, 32); + srli(scratch, scratch, 32); + slli(scratch, scratch, 32); + or_(scratch, scratch, scratch2); + fmv_d_x(dst, scratch); +} + +void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) { + // Handle special values first. + if (src == bit_cast(0.0f) && has_single_zero_reg_set_) { + if (dst != kDoubleRegZero) fmv_s(dst, kDoubleRegZero); + } else if (src == bit_cast(-0.0f) && has_single_zero_reg_set_) { + Neg_s(dst, kDoubleRegZero); + } else { + if (dst == kDoubleRegZero) { + DCHECK(src == bit_cast(0.0f)); + fmv_w_x(dst, zero_reg); + has_single_zero_reg_set_ = true; + has_double_zero_reg_set_ = false; + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(static_cast(src))); + fmv_w_x(dst, scratch); + } + } +} + +void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) { + // Handle special values first. + if (src == bit_cast(0.0) && has_double_zero_reg_set_) { + if (dst != kDoubleRegZero) fmv_d(dst, kDoubleRegZero); + } else if (src == bit_cast(-0.0) && has_double_zero_reg_set_) { + Neg_d(dst, kDoubleRegZero); + } else { + if (dst == kDoubleRegZero) { + DCHECK(src == bit_cast(0.0)); + fmv_d_x(dst, zero_reg); + has_double_zero_reg_set_ = true; + has_single_zero_reg_set_ = false; + } else { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(src)); + fmv_d_x(dst, scratch); + } + } +} + +void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt, + Condition cond) { + switch (cond) { + case eq: + Seq(rd, rs, rt); + break; + case ne: + Sne(rd, rs, rt); + break; + + // Signed comparison. + case greater: + Sgt(rd, rs, rt); + break; + case greater_equal: + Sge(rd, rs, rt); // rs >= rt + break; + case less: + Slt(rd, rs, rt); // rs < rt + break; + case less_equal: + Sle(rd, rs, rt); // rs <= rt + break; + + // Unsigned comparison. + case Ugreater: + Sgtu(rd, rs, rt); // rs > rt + break; + case Ugreater_equal: + Sgeu(rd, rs, rt); // rs >= rt + break; + case Uless: + Sltu(rd, rs, rt); // rs < rt + break; + case Uless_equal: + Sleu(rd, rs, rt); // rs <= rt + break; + case cc_always: + UNREACHABLE(); + default: + UNREACHABLE(); + } +} + +// dest <- (condition != 0 ? zero : dest) +void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, + Register condition) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + seqz(scratch, condition); + // neg + and may be more efficient than mul(dest, dest, scratch) + neg(scratch, scratch); // 0 is still 0, 1 becomes all 1s + and_(dest, dest, scratch); +} + +// dest <- (condition == 0 ? 0 : dest) +void TurboAssembler::LoadZeroIfConditionZero(Register dest, + Register condition) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + snez(scratch, condition); + // neg + and may be more efficient than mul(dest, dest, scratch); + neg(scratch, scratch); // 0 is still 0, 1 becomes all 1s + and_(dest, dest, scratch); +} + +void TurboAssembler::Clz32(Register rd, Register xx) { + // 32 bit unsigned in lower word: count number of leading zeros. + // int n = 32; + // unsigned y; + + // y = x >>16; if (y != 0) { n = n -16; x = y; } + // y = x >> 8; if (y != 0) { n = n - 8; x = y; } + // y = x >> 4; if (y != 0) { n = n - 4; x = y; } + // y = x >> 2; if (y != 0) { n = n - 2; x = y; } + // y = x >> 1; if (y != 0) {rd = n - 2; return;} + // rd = n - x; + + Label L0, L1, L2, L3, L4; + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register x = rd; + Register y = temps.Acquire(); + Register n = temps.Acquire(); + DCHECK(xx != y && xx != n); + Move(x, xx); + li(n, Operand(32)); + srliw(y, x, 16); + BranchShort(&L0, eq, y, Operand(zero_reg)); + Move(x, y); + addiw(n, n, -16); + bind(&L0); + srliw(y, x, 8); + BranchShort(&L1, eq, y, Operand(zero_reg)); + addiw(n, n, -8); + Move(x, y); + bind(&L1); + srliw(y, x, 4); + BranchShort(&L2, eq, y, Operand(zero_reg)); + addiw(n, n, -4); + Move(x, y); + bind(&L2); + srliw(y, x, 2); + BranchShort(&L3, eq, y, Operand(zero_reg)); + addiw(n, n, -2); + Move(x, y); + bind(&L3); + srliw(y, x, 1); + subw(rd, n, x); + BranchShort(&L4, eq, y, Operand(zero_reg)); + addiw(rd, n, -2); + bind(&L4); +} + +void TurboAssembler::Clz64(Register rd, Register xx) { + // 64 bit: count number of leading zeros. + // int n = 64; + // unsigned y; + + // y = x >>32; if (y != 0) { n = n - 32; x = y; } + // y = x >>16; if (y != 0) { n = n - 16; x = y; } + // y = x >> 8; if (y != 0) { n = n - 8; x = y; } + // y = x >> 4; if (y != 0) { n = n - 4; x = y; } + // y = x >> 2; if (y != 0) { n = n - 2; x = y; } + // y = x >> 1; if (y != 0) {rd = n - 2; return;} + // rd = n - x; + + Label L0, L1, L2, L3, L4, L5; + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register x = rd; + Register y = temps.Acquire(); + Register n = temps.Acquire(); + DCHECK(xx != y && xx != n); + Move(x, xx); + li(n, Operand(64)); + srli(y, x, 32); + BranchShort(&L0, eq, y, Operand(zero_reg)); + addiw(n, n, -32); + Move(x, y); + bind(&L0); + srli(y, x, 16); + BranchShort(&L1, eq, y, Operand(zero_reg)); + addiw(n, n, -16); + Move(x, y); + bind(&L1); + srli(y, x, 8); + BranchShort(&L2, eq, y, Operand(zero_reg)); + addiw(n, n, -8); + Move(x, y); + bind(&L2); + srli(y, x, 4); + BranchShort(&L3, eq, y, Operand(zero_reg)); + addiw(n, n, -4); + Move(x, y); + bind(&L3); + srli(y, x, 2); + BranchShort(&L4, eq, y, Operand(zero_reg)); + addiw(n, n, -2); + Move(x, y); + bind(&L4); + srli(y, x, 1); + subw(rd, n, x); + BranchShort(&L5, eq, y, Operand(zero_reg)); + addiw(rd, n, -2); + bind(&L5); +} + +void TurboAssembler::Ctz32(Register rd, Register rs) { + // Convert trailing zeroes to trailing ones, and bits to their left + // to zeroes. + + BlockTrampolinePoolScope block_trampoline_pool(this); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Add64(scratch, rs, -1); + Xor(rd, scratch, rs); + And(rd, rd, scratch); + // Count number of leading zeroes. + } + Clz32(rd, rd); + { + // Subtract number of leading zeroes from 32 to get number of trailing + // ones. Remember that the trailing ones were formerly trailing zeroes. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, 32); + Sub32(rd, scratch, rd); + } +} + +void TurboAssembler::Ctz64(Register rd, Register rs) { + // Convert trailing zeroes to trailing ones, and bits to their left + // to zeroes. + + BlockTrampolinePoolScope block_trampoline_pool(this); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Add64(scratch, rs, -1); + Xor(rd, scratch, rs); + And(rd, rd, scratch); + // Count number of leading zeroes. + } + Clz64(rd, rd); + { + // Subtract number of leading zeroes from 64 to get number of trailing + // ones. Remember that the trailing ones were formerly trailing zeroes. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, 64); + Sub64(rd, scratch, rd); + } +} + +void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) { + DCHECK_NE(scratch, rs); + DCHECK_NE(scratch, rd); + // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + // + // A generalization of the best bit counting method to integers of + // bit-widths up to 128 (parameterized by type T) is this: + // + // v = v - ((v >> 1) & (T)~(T)0/3); // temp + // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp + // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp + // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count + // + // There are algorithms which are faster in the cases where very few + // bits are set but the algorithm here attempts to minimize the total + // number of instructions executed even when a large number of bits + // are set. + // The number of instruction is 20. + // uint32_t B0 = 0x55555555; // (T)~(T)0/3 + // uint32_t B1 = 0x33333333; // (T)~(T)0/15*3 + // uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15 + // uint32_t value = 0x01010101; // (T)~(T)0/255 + + uint32_t shift = 24; + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch2 = temps.Acquire(); + Register value = temps.Acquire(); + DCHECK((rd != value) && (rs != value)); + li(value, 0x01010101); // value = 0x01010101; + li(scratch2, 0x55555555); // B0 = 0x55555555; + Srl32(scratch, rs, 1); + And(scratch, scratch, scratch2); + Sub32(scratch, rs, scratch); + li(scratch2, 0x33333333); // B1 = 0x33333333; + slli(rd, scratch2, 4); + or_(scratch2, scratch2, rd); + And(rd, scratch, scratch2); + Srl32(scratch, scratch, 2); + And(scratch, scratch, scratch2); + Add32(scratch, rd, scratch); + srliw(rd, scratch, 4); + Add32(rd, rd, scratch); + li(scratch2, 0xF); + Mul32(scratch2, value, scratch2); // B2 = 0x0F0F0F0F; + And(rd, rd, scratch2); + Mul32(rd, rd, value); + Srl32(rd, rd, shift); +} + +void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) { + DCHECK_NE(scratch, rs); + DCHECK_NE(scratch, rd); + // uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 + // uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 + // uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15 + // uint64_t value = 0x0101010101010101l; // (T)~(T)0/255 + // uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE + + uint64_t shift = 24; + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch2 = temps.Acquire(); + Register value = temps.Acquire(); + DCHECK((rd != value) && (rs != value)); + li(value, 0x1111111111111111l); // value = 0x1111111111111111l; + li(scratch2, 5); + Mul64(scratch2, value, scratch2); // B0 = 0x5555555555555555l; + Srl64(scratch, rs, 1); + And(scratch, scratch, scratch2); + Sub64(scratch, rs, scratch); + li(scratch2, 3); + Mul64(scratch2, value, scratch2); // B1 = 0x3333333333333333l; + And(rd, scratch, scratch2); + Srl64(scratch, scratch, 2); + And(scratch, scratch, scratch2); + Add64(scratch, rd, scratch); + Srl64(rd, scratch, 4); + Add64(rd, rd, scratch); + li(scratch2, 0xF); + li(value, 0x0101010101010101l); // value = 0x0101010101010101l; + Mul64(scratch2, value, scratch2); // B2 = 0x0F0F0F0F0F0F0F0Fl; + And(rd, rd, scratch2); + Mul64(rd, rd, value); + srli(rd, rd, 32 + shift); +} + +void TurboAssembler::TryInlineTruncateDoubleToI(Register result, + DoubleRegister double_input, + Label* done) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // if scratch == 1, exception happens during truncation + Trunc_w_d(result, double_input, scratch); + // If we had no exceptions (i.e., scratch==1) we are done. + Branch(done, eq, scratch, Operand(1)); +} + +void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, + Register result, + DoubleRegister double_input, + StubCallMode stub_mode) { + Label done; + + TryInlineTruncateDoubleToI(result, double_input, &done); + + // If we fell through then inline version didn't succeed - call stub + // instead. + push(ra); + Sub64(sp, sp, Operand(kDoubleSize)); // Put input on stack. + fsd(double_input, sp, 0); + + if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { + Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); + } else { + Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); + } + ld(result, sp, 0); + + Add64(sp, sp, Operand(kDoubleSize)); + pop(ra); + + bind(&done); +} + +// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. +#define BRANCH_ARGS_CHECK(cond, rs, rt) \ + DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ + (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) + +void TurboAssembler::Branch(int32_t offset) { + DCHECK(is_int21(offset)); + BranchShort(offset); +} + +void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, + const Operand& rt, Label::Distance near_jump) { + bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt); + DCHECK(is_near); + USE(is_near); +} + +void TurboAssembler::Branch(Label* L) { + if (L->is_bound()) { + if (is_near(L)) { + BranchShort(L); + } else { + BranchLong(L); + } + } else { + if (is_trampoline_emitted()) { + BranchLong(L); + } else { + BranchShort(L); + } + } +} + +void TurboAssembler::Branch(Label* L, Condition cond, Register rs, + const Operand& rt, Label::Distance near_jump) { + if (L->is_bound()) { + if (!BranchShortCheck(0, L, cond, rs, rt)) { + if (cond != cc_always) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchLong(L); + bind(&skip); + } else { + BranchLong(L); + EmitConstPoolWithJumpIfNeeded(); + } + } + } else { + if (is_trampoline_emitted() && near_jump == Label::Distance::kFar) { + if (cond != cc_always) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchLong(L); + bind(&skip); + } else { + BranchLong(L); + EmitConstPoolWithJumpIfNeeded(); + } + } else { + BranchShort(L, cond, rs, rt); + } + } +} + +void TurboAssembler::Branch(Label* L, Condition cond, Register rs, + RootIndex index) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Branch(L, cond, rs, Operand(scratch)); +} + +void TurboAssembler::BranchShortHelper(int32_t offset, Label* L) { + DCHECK(L == nullptr || offset == 0); + offset = GetOffset(offset, L, OffsetSize::kOffset21); + j(offset); +} + +void TurboAssembler::BranchShort(int32_t offset) { + DCHECK(is_int21(offset)); + BranchShortHelper(offset, nullptr); +} + +void TurboAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); } + +int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { + if (L) { + offset = branch_offset_helper(L, bits); + } else { + DCHECK(is_intn(offset, bits)); + } + return offset; +} + +Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, + Register scratch) { + Register r2 = no_reg; + if (rt.is_reg()) { + r2 = rt.rm(); + } else { + r2 = scratch; + li(r2, rt); + } + + return r2; +} + +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, + OffsetSize bits) { + if (!is_near(L, bits)) return false; + *offset = GetOffset(*offset, L, bits); + return true; +} + +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt) { + if (!is_near(L, bits)) return false; + *scratch = GetRtAsRegisterHelper(rt, *scratch); + *offset = GetOffset(*offset, L, bits); + return true; +} + +bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt) { + DCHECK(L == nullptr || offset == 0); + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register scratch = no_reg; + if (!rt.is_reg()) { + scratch = temps.Acquire(); + li(scratch, rt); + } else { + scratch = rt.rm(); + } + { + BlockTrampolinePoolScope block_trampoline_pool(this); + switch (cond) { + case cc_always: + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + j(offset); + EmitConstPoolWithJumpIfNeeded(); + break; + case eq: + // rs == rt + if (rt.is_reg() && rs == rt.rm()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + j(offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + beq(rs, scratch, offset); + } + break; + case ne: + // rs != rt + if (rt.is_reg() && rs == rt.rm()) { + break; // No code needs to be emitted + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + bne(rs, scratch, offset); + } + break; + + // Signed comparison. + case greater: + // rs > rt + if (rt.is_reg() && rs == rt.rm()) { + break; // No code needs to be emitted. + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + bgt(rs, scratch, offset); + } + break; + case greater_equal: + // rs >= rt + if (rt.is_reg() && rs == rt.rm()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + j(offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + bge(rs, scratch, offset); + } + break; + case less: + // rs < rt + if (rt.is_reg() && rs == rt.rm()) { + break; // No code needs to be emitted. + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + blt(rs, scratch, offset); + } + break; + case less_equal: + // rs <= rt + if (rt.is_reg() && rs == rt.rm()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + j(offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + ble(rs, scratch, offset); + } + break; + + // Unsigned comparison. + case Ugreater: + // rs > rt + if (rt.is_reg() && rs == rt.rm()) { + break; // No code needs to be emitted. + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + bgtu(rs, scratch, offset); + } + break; + case Ugreater_equal: + // rs >= rt + if (rt.is_reg() && rs == rt.rm()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + j(offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + bgeu(rs, scratch, offset); + } + break; + case Uless: + // rs < rt + if (rt.is_reg() && rs == rt.rm()) { + break; // No code needs to be emitted. + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + bltu(rs, scratch, offset); + } + break; + case Uless_equal: + // rs <= rt + if (rt.is_reg() && rs == rt.rm()) { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; + j(offset); + } else { + if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false; + bleu(rs, scratch, offset); + } + break; + default: + UNREACHABLE(); + } + } + + CheckTrampolinePoolQuick(1); + return true; +} + +bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt) { + BRANCH_ARGS_CHECK(cond, rs, rt); + + if (!L) { + DCHECK(is_int13(offset)); + return BranchShortHelper(offset, nullptr, cond, rs, rt); + } else { + DCHECK_EQ(offset, 0); + return BranchShortHelper(0, L, cond, rs, rt); + } +} + +void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, + const Operand& rt) { + BranchShortCheck(offset, nullptr, cond, rs, rt); +} + +void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, + const Operand& rt) { + BranchShortCheck(0, L, cond, rs, rt); +} + +void TurboAssembler::BranchAndLink(int32_t offset) { + BranchAndLinkShort(offset); +} + +void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, + const Operand& rt) { + bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt); + DCHECK(is_near); + USE(is_near); +} + +void TurboAssembler::BranchAndLink(Label* L) { + if (L->is_bound()) { + if (is_near(L)) { + BranchAndLinkShort(L); + } else { + BranchAndLinkLong(L); + } + } else { + if (is_trampoline_emitted()) { + BranchAndLinkLong(L); + } else { + BranchAndLinkShort(L); + } + } +} + +void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, + const Operand& rt) { + if (L->is_bound()) { + if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchAndLinkLong(L); + bind(&skip); + } + } else { + if (is_trampoline_emitted()) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + BranchAndLinkLong(L); + bind(&skip); + } else { + BranchAndLinkShortCheck(0, L, cond, rs, rt); + } + } +} + +void TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) { + DCHECK(L == nullptr || offset == 0); + offset = GetOffset(offset, L, OffsetSize::kOffset21); + jal(offset); +} + +void TurboAssembler::BranchAndLinkShort(int32_t offset) { + DCHECK(is_int21(offset)); + BranchAndLinkShortHelper(offset, nullptr); +} + +void TurboAssembler::BranchAndLinkShort(Label* L) { + BranchAndLinkShortHelper(0, L); +} + +// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly +// with the slt instructions. We could use sub or add instead but we would miss +// overflow cases, so we keep slt and add an intermediate third instruction. +bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, + Condition cond, Register rs, + const Operand& rt) { + DCHECK(L == nullptr || offset == 0); + if (!is_near(L, OffsetSize::kOffset21)) return false; + + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + + if (cond == cc_always) { + offset = GetOffset(offset, L, OffsetSize::kOffset21); + jal(offset); + } else { + Branch(kInstrSize * 2, NegateCondition(cond), rs, + Operand(GetRtAsRegisterHelper(rt, scratch))); + offset = GetOffset(offset, L, OffsetSize::kOffset21); + jal(offset); + } + + return true; +} + +bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, + Condition cond, Register rs, + const Operand& rt) { + BRANCH_ARGS_CHECK(cond, rs, rt); + + if (!L) { + DCHECK(is_int21(offset)); + return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt); + } else { + DCHECK_EQ(offset, 0); + return BranchAndLinkShortHelper(0, L, cond, rs, rt); + } +} + +void TurboAssembler::LoadFromConstantsTable(Register destination, + int constant_index) { + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); + LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); + LoadTaggedPointerField( + destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt( + constant_index))); +} + +void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { + Ld(destination, MemOperand(kRootRegister, offset)); +} + +void TurboAssembler::LoadRootRegisterOffset(Register destination, + intptr_t offset) { + if (offset == 0) { + Move(destination, kRootRegister); + } else { + Add64(destination, kRootRegister, Operand(offset)); + } +} + +void TurboAssembler::Jump(Register target, Condition cond, Register rs, + const Operand& rt) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond == cc_always) { + jr(target); + ForceConstantPoolEmissionWithoutJump(); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(kInstrSize * 2, NegateCondition(cond), rs, rt); + jr(target); + } +} + +void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, + Condition cond, Register rs, const Operand& rt) { + Label skip; + if (cond != cc_always) { + Branch(&skip, NegateCondition(cond), rs, rt); + } + { + BlockTrampolinePoolScope block_trampoline_pool(this); + li(t6, Operand(target, rmode)); + Jump(t6, al, zero_reg, Operand(zero_reg)); + EmitConstPoolWithJumpIfNeeded(); + bind(&skip); + } +} + +void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, + Register rs, const Operand& rt) { + DCHECK(!RelocInfo::IsCodeTarget(rmode)); + Jump(static_cast(target), rmode, cond, rs, rt); +} + +void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, + Condition cond, Register rs, const Operand& rt) { + DCHECK(RelocInfo::IsCodeTarget(rmode)); + + BlockTrampolinePoolScope block_trampoline_pool(this); + Builtin builtin = Builtin::kNoBuiltinId; + bool target_is_isolate_independent_builtin = + isolate()->builtins()->IsBuiltinHandle(code, &builtin) && + Builtins::IsIsolateIndependent(builtin); + if (target_is_isolate_independent_builtin && + options().use_pc_relative_calls_and_jumps) { + int32_t code_target_index = AddCodeTarget(code); + Label skip; + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond != al) { + Branch(&skip, NegateCondition(cond), rs, rt); + } + RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET); + GenPCRelativeJump(t6, code_target_index); + bind(&skip); + return; + } else if (root_array_available_ && options().isolate_independent_code && + target_is_isolate_independent_builtin) { + int offset = static_cast(code->builtin_id()) * kSystemPointerSize + + IsolateData::builtin_entry_table_offset(); + Ld(t6, MemOperand(kRootRegister, offset)); + Jump(t6, cond, rs, rt); + return; + } else if (options().inline_offheap_trampolines && + target_is_isolate_independent_builtin) { + // Inline the trampoline. + RecordCommentForOffHeapTrampoline(builtin); + li(t6, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); + Jump(t6, cond, rs, rt); + RecordComment("]"); + return; + } + + int32_t target_index = AddCodeTarget(code); + Jump(static_cast(target_index), rmode, cond, rs, rt); +} + +void TurboAssembler::Jump(const ExternalReference& reference) { + li(t6, reference); + Jump(t6); +} + +// Note: To call gcc-compiled C code on riscv64, you must call through t6. +void TurboAssembler::Call(Register target, Condition cond, Register rs, + const Operand& rt) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond == cc_always) { + jalr(ra, target, 0); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(kInstrSize * 2, NegateCondition(cond), rs, rt); + jalr(ra, target, 0); + } +} + +void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, + unsigned higher_limit, + Label* on_in_range) { + if (lower_limit != 0) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Sub64(scratch, value, Operand(lower_limit)); + Branch(on_in_range, Uless_equal, scratch, + Operand(higher_limit - lower_limit)); + } else { + Branch(on_in_range, Uless_equal, value, + Operand(higher_limit - lower_limit)); + } +} + +void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, + Register rs, const Operand& rt) { + li(t6, Operand(static_cast(target), rmode), ADDRESS_LOAD); + Call(t6, cond, rs, rt); +} + +void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, + Condition cond, Register rs, const Operand& rt) { + Builtin builtin = Builtin::kNoBuiltinId; + bool target_is_isolate_independent_builtin = + isolate()->builtins()->IsBuiltinHandle(code, &builtin) && + Builtins::IsIsolateIndependent(builtin); + if (target_is_isolate_independent_builtin && + options().use_pc_relative_calls_and_jumps) { + int32_t code_target_index = AddCodeTarget(code); + Label skip; + BlockTrampolinePoolScope block_trampoline_pool(this); + RecordCommentForOffHeapTrampoline(builtin); + if (cond != al) { + Branch(&skip, NegateCondition(cond), rs, rt); + } + RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET); + GenPCRelativeJumpAndLink(t6, code_target_index); + bind(&skip); + RecordComment("]"); + return; + } else if (root_array_available_ && options().isolate_independent_code && + target_is_isolate_independent_builtin) { + int offset = static_cast(code->builtin_id()) * kSystemPointerSize + + IsolateData::builtin_entry_table_offset(); + LoadRootRelative(t6, offset); + Call(t6, cond, rs, rt); + return; + } else if (options().inline_offheap_trampolines && + target_is_isolate_independent_builtin) { + // Inline the trampoline. + RecordCommentForOffHeapTrampoline(builtin); + li(t6, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); + Call(t6, cond, rs, rt); + RecordComment("]"); + return; + } + + DCHECK(RelocInfo::IsCodeTarget(rmode)); + DCHECK(code->IsExecutable()); + int32_t target_index = AddCodeTarget(code); + Call(static_cast

(target_index), rmode, cond, rs, rt); +} + +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) { + STATIC_ASSERT(kSystemPointerSize == 8); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + + // The builtin register contains the builtin index as a Smi. + SmiUntag(builtin, builtin); + CalcScaledAddress(builtin, kRootRegister, builtin, kSystemPointerSizeLog2); + Ld(builtin, MemOperand(builtin, IsolateData::builtin_entry_table_offset())); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin) { + LoadEntryFromBuiltinIndex(builtin); + Call(builtin); +} + +void TurboAssembler::CallBuiltin(Builtin builtin) { + RecordCommentForOffHeapTrampoline(builtin); + if (options().short_builtin_calls) { + Call(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY); + } else { + Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET); + } + RecordComment("]"); +} + +void TurboAssembler::TailCallBuiltin(Builtin builtin) { + RecordCommentForOffHeapTrampoline(builtin); + if (options().short_builtin_calls) { + Jump(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY); + } else { + Jump(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET); + } + RecordComment("]"); +} + +void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, + Register destination) { + Ld(destination, EntryFromBuiltinAsOperand(builtin)); +} + +MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { + DCHECK(root_array_available()); + return MemOperand(kRootRegister, + IsolateData::BuiltinEntrySlotOffset(builtin)); +} + +void TurboAssembler::PatchAndJump(Address target) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + auipc(scratch, 0); // Load PC into scratch + Ld(t6, MemOperand(scratch, kInstrSize * 4)); + jr(t6); + nop(); // For alignment + DCHECK_EQ(reinterpret_cast(pc_) % 8, 0); + *reinterpret_cast(pc_) = target; // pc_ should be align. + pc_ += sizeof(uint64_t); +} + +void TurboAssembler::StoreReturnAddressAndCall(Register target) { + // This generates the final instruction sequence for calls to C functions + // once an exit frame has been constructed. + // + // Note that this assumes the caller code (i.e. the Code object currently + // being generated) is immovable or that the callee function cannot trigger + // GC, since the callee function will return to it. + // + // Compute the return address in lr to return to after the jump below. The + // pc is already at '+ 8' from the current instruction; but return is after + // three instructions, so add another 4 to pc to get the return address. + // + Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); + int kNumInstructionsToJump = 5; + if (FLAG_riscv_c_extension) kNumInstructionsToJump = 4; + Label find_ra; + // Adjust the value in ra to point to the correct return location, one + // instruction past the real call into C code (the jalr(t6)), and push it. + // This is the return address of the exit frame. + auipc(ra, 0); // Set ra the current PC + bind(&find_ra); + addi(ra, ra, + (kNumInstructionsToJump + 1) * + kInstrSize); // Set ra to insn after the call + + // This spot was reserved in EnterExitFrame. + Sd(ra, MemOperand(sp)); + addi(sp, sp, -kCArgsSlotsSize); + // Stack is still aligned. + + // Call the C routine. + Mv(t6, + target); // Function pointer to t6 to conform to ABI for PIC. + jalr(t6); + // Make sure the stored 'ra' points to this position. + DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); +} + +void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) { + Jump(ra, cond, rs, rt); + if (cond == al) { + ForceConstantPoolEmissionWithoutJump(); + } +} + + +void TurboAssembler::BranchLong(Label* L) { + // Generate position independent long branch. + BlockTrampolinePoolScope block_trampoline_pool(this); + int64_t imm64; + imm64 = branch_long_offset(L); + GenPCRelativeJump(t6, imm64); + EmitConstPoolWithJumpIfNeeded(); +} + +void TurboAssembler::BranchAndLinkLong(Label* L) { + // Generate position independent long branch and link. + BlockTrampolinePoolScope block_trampoline_pool(this); + int64_t imm64; + imm64 = branch_long_offset(L); + GenPCRelativeJumpAndLink(t6, imm64); +} + +void TurboAssembler::DropAndRet(int drop) { + Add64(sp, sp, drop * kSystemPointerSize); + Ret(); +} + +void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, + const Operand& r2) { + // Both Drop and Ret need to be conditional. + Label skip; + if (cond != cc_always) { + Branch(&skip, NegateCondition(cond), r1, r2); + } + + Drop(drop); + Ret(); + + if (cond != cc_always) { + bind(&skip); + } +} + +void TurboAssembler::Drop(int count, Condition cond, Register reg, + const Operand& op) { + if (count <= 0) { + return; + } + + Label skip; + + if (cond != al) { + Branch(&skip, NegateCondition(cond), reg, op); + } + + Add64(sp, sp, Operand(count * kSystemPointerSize)); + + if (cond != al) { + bind(&skip); + } +} + +void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { + if (scratch == no_reg) { + Xor(reg1, reg1, Operand(reg2)); + Xor(reg2, reg2, Operand(reg1)); + Xor(reg1, reg1, Operand(reg2)); + } else { + Mv(scratch, reg1); + Mv(reg1, reg2); + Mv(reg2, scratch); + } +} + +void TurboAssembler::Call(Label* target) { BranchAndLink(target); } + +void TurboAssembler::LoadAddress(Register dst, Label* target, + RelocInfo::Mode rmode) { + int32_t offset; + if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) { + CHECK(is_int32(offset + 0x800)); + int32_t Hi20 = (((int32_t)offset + 0x800) >> 12); + int32_t Lo12 = (int32_t)offset << 20 >> 20; + BlockTrampolinePoolScope block_trampoline_pool(this); + auipc(dst, Hi20); + addi(dst, dst, Lo12); + } else { + uint64_t address = jump_address(target); + li(dst, Operand(address, rmode), ADDRESS_LOAD); + } +} + +void TurboAssembler::Push(Smi smi) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(smi)); + push(scratch); +} + +void TurboAssembler::PushArray(Register array, Register size, + PushArrayOrder order) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + Label loop, entry; + if (order == PushArrayOrder::kReverse) { + Mv(scratch, zero_reg); + jmp(&entry); + bind(&loop); + CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2); + Ld(scratch2, MemOperand(scratch2)); + push(scratch2); + Add64(scratch, scratch, Operand(1)); + bind(&entry); + Branch(&loop, less, scratch, Operand(size)); + } else { + Mv(scratch, size); + jmp(&entry); + bind(&loop); + CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2); + Ld(scratch2, MemOperand(scratch2)); + push(scratch2); + bind(&entry); + Add64(scratch, scratch, Operand(-1)); + Branch(&loop, greater_equal, scratch, Operand(zero_reg)); + } +} + +void TurboAssembler::Push(Handle handle) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(handle)); + push(scratch); +} + +// --------------------------------------------------------------------------- +// Exception handling. + +void MacroAssembler::PushStackHandler() { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize); + + Push(Smi::zero()); // Padding. + + // Link the current handler as the next handler. + UseScratchRegisterScope temps(this); + Register handler_address = temps.Acquire(); + li(handler_address, + ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); + Register handler = temps.Acquire(); + Ld(handler, MemOperand(handler_address)); + push(handler); + + // Set this new handler as the current one. + Sd(sp, MemOperand(handler_address)); +} + +void MacroAssembler::PopStackHandler() { + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(a1); + Add64(sp, sp, + Operand(static_cast(StackHandlerConstants::kSize - + kSystemPointerSize))); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, + ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); + Sd(a1, MemOperand(scratch)); +} + +void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, + const DoubleRegister src) { + // Subtracting 0.0 preserves all inputs except for signalling NaNs, which + // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0 + // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. + fsub_d(dst, src, kDoubleRegZero); +} + +void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { + Move(dst, fa0); // Reg fa0 is FP return value. +} + +void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { + Move(dst, fa0); // Reg fa0 is FP first argument value. +} + +void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); } + +void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); } + +void TurboAssembler::MovToFloatParameters(DoubleRegister src1, + DoubleRegister src2) { + const DoubleRegister fparg2 = fa1; + if (src2 == fa0) { + DCHECK(src1 != fparg2); + Move(fparg2, src2); + Move(fa0, src1); + } else { + Move(fa0, src1); + Move(fparg2, src2); + } +} + +// ----------------------------------------------------------------------------- +// JavaScript invokes. + +void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { + DCHECK(root_array_available()); + Isolate* isolate = this->isolate(); + ExternalReference limit = + kind == StackLimitKind::kRealStackLimit + ? ExternalReference::address_of_real_jslimit(isolate) + : ExternalReference::address_of_jslimit(isolate); + DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + + intptr_t offset = + TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + CHECK(is_int32(offset)); + Ld(destination, MemOperand(kRootRegister, static_cast(offset))); +} + +void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1, + Register scratch2, + Label* stack_overflow, Label* done) { + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + DCHECK(stack_overflow != nullptr || done != nullptr); + LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit); + // Make scratch1 the space we have left. The stack might already be overflowed + // here which will cause scratch1 to become negative. + Sub64(scratch1, sp, scratch1); + // Check if the arguments will overflow the stack. + Sll64(scratch2, num_args, kSystemPointerSizeLog2); + // Signed comparison. + if (stack_overflow != nullptr) { + Branch(stack_overflow, le, scratch1, Operand(scratch2)); + } else if (done != nullptr) { + Branch(done, gt, scratch1, Operand(scratch2)); + } else { + UNREACHABLE(); + } +} + +void MacroAssembler::InvokePrologue(Register expected_parameter_count, + Register actual_parameter_count, + Label* done, InvokeType type) { + Label regular_invoke; + + // a0: actual arguments count + // a1: function (passed through to callee) + // a2: expected arguments count + + DCHECK_EQ(actual_parameter_count, a0); + DCHECK_EQ(expected_parameter_count, a2); + + // If the expected parameter count is equal to the adaptor sentinel, no need + // to push undefined value as arguments. + if (kDontAdaptArgumentsSentinel != 0) { + Branch(®ular_invoke, eq, expected_parameter_count, + Operand(kDontAdaptArgumentsSentinel)); + } + // If overapplication or if the actual argument count is equal to the + // formal parameter count, no need to push extra undefined values. + Sub64(expected_parameter_count, expected_parameter_count, + actual_parameter_count); + Branch(®ular_invoke, le, expected_parameter_count, Operand(zero_reg)); + + Label stack_overflow; + { + UseScratchRegisterScope temps(this); + StackOverflowCheck(expected_parameter_count, temps.Acquire(), + temps.Acquire(), &stack_overflow); + } + // Underapplication. Move the arguments already in the stack, including the + // receiver and the return address. + { + Label copy; + Register src = a6, dest = a7; + Move(src, sp); + Sll64(t0, expected_parameter_count, kSystemPointerSizeLog2); + Sub64(sp, sp, Operand(t0)); + // Update stack pointer. + Move(dest, sp); + Move(t0, actual_parameter_count); + bind(©); + Ld(t1, MemOperand(src, 0)); + Sd(t1, MemOperand(dest, 0)); + Sub64(t0, t0, Operand(1)); + Add64(src, src, Operand(kSystemPointerSize)); + Add64(dest, dest, Operand(kSystemPointerSize)); + Branch(©, gt, t0, Operand(zero_reg)); + } + + // Fill remaining expected arguments with undefined values. + LoadRoot(t0, RootIndex::kUndefinedValue); + { + Label loop; + bind(&loop); + Sd(t0, MemOperand(a7, 0)); + Sub64(expected_parameter_count, expected_parameter_count, Operand(1)); + Add64(a7, a7, Operand(kSystemPointerSize)); + Branch(&loop, gt, expected_parameter_count, Operand(zero_reg)); + } + Branch(®ular_invoke); + + bind(&stack_overflow); + { + FrameScope frame( + this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL); + CallRuntime(Runtime::kThrowStackOverflow); + break_(0xCC); + } + bind(®ular_invoke); +} + +void MacroAssembler::CheckDebugHook(Register fun, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count) { + Label skip_hook; + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, + ExternalReference::debug_hook_on_function_call_address(isolate())); + Lb(scratch, MemOperand(scratch)); + Branch(&skip_hook, eq, scratch, Operand(zero_reg)); + } + { + // Load receiver to pass it later to DebugOnFunctionCall hook. + UseScratchRegisterScope temps(this); + Register receiver = temps.Acquire(); + LoadReceiver(receiver, actual_parameter_count); + + FrameScope frame( + this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL); + SmiTag(expected_parameter_count); + Push(expected_parameter_count); + + SmiTag(actual_parameter_count); + Push(actual_parameter_count); + + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Push(receiver); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + + Pop(actual_parameter_count); + SmiUntag(actual_parameter_count); + + Pop(expected_parameter_count); + SmiUntag(expected_parameter_count); + } + bind(&skip_hook); +} + +void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count, + InvokeType type) { + // You can't call a function without a valid frame. + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); + DCHECK_EQ(function, a1); + DCHECK_IMPLIES(new_target.is_valid(), new_target == a3); + + // On function call, call into the debugger if necessary. + CheckDebugHook(function, new_target, expected_parameter_count, + actual_parameter_count); + + // Clear the new.target register if not given. + if (!new_target.is_valid()) { + LoadRoot(a3, RootIndex::kUndefinedValue); + } + + Label done; + InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type); + // We call indirectly through the code field in the function to + // allow recompilation to take effect without changing any of the + // call sites. + Register code = kJavaScriptCallCodeStartRegister; + LoadTaggedPointerField(code, + FieldMemOperand(function, JSFunction::kCodeOffset)); + switch (type) { + case InvokeType::kCall: + CallCodeObject(code); + break; + case InvokeType::kJump: + JumpCodeObject(code); + break; + } + + // Continue here if InvokePrologue does handle the invocation due to + // mismatched parameter counts. + bind(&done); +} + +void MacroAssembler::InvokeFunctionWithNewTarget( + Register function, Register new_target, Register actual_parameter_count, + InvokeType type) { + // You can't call a function without a valid frame. + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); + + // Contract with called JS functions requires that function is passed in a1. + DCHECK_EQ(function, a1); + Register expected_parameter_count = a2; + { + UseScratchRegisterScope temps(this); + Register temp_reg = temps.Acquire(); + LoadTaggedPointerField( + temp_reg, + FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); + LoadTaggedPointerField( + cp, FieldMemOperand(function, JSFunction::kContextOffset)); + // The argument count is stored as uint16_t + Lhu(expected_parameter_count, + FieldMemOperand(temp_reg, + SharedFunctionInfo::kFormalParameterCountOffset)); + } + InvokeFunctionCode(function, new_target, expected_parameter_count, + actual_parameter_count, type); +} + +void MacroAssembler::InvokeFunction(Register function, + Register expected_parameter_count, + Register actual_parameter_count, + InvokeType type) { + // You can't call a function without a valid frame. + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); + + // Contract with called JS functions requires that function is passed in a1. + DCHECK_EQ(function, a1); + + // Get the function and setup the context. + LoadTaggedPointerField(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + InvokeFunctionCode(a1, no_reg, expected_parameter_count, + actual_parameter_count, type); +} + +// --------------------------------------------------------------------------- +// Support functions. + +void MacroAssembler::GetObjectType(Register object, Register map, + Register type_reg) { + LoadMap(map, object); + Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); +} + +void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, + InstanceType lower_limit, + Register range) { + Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); + Sub64(range, type_reg, Operand(lower_limit)); +} +//------------------------------------------------------------------------------ +// Wasm +void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, + VSew sew, Vlmul lmul) { + VU.set(kScratchReg, sew, lmul); + vmseq_vv(v0, lhs, rhs); + li(kScratchReg, -1); + vmv_vx(dst, zero_reg); + vmerge_vx(dst, kScratchReg, dst); +} + +void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, + VSew sew, Vlmul lmul) { + VU.set(kScratchReg, sew, lmul); + vmsne_vv(v0, lhs, rhs); + li(kScratchReg, -1); + vmv_vx(dst, zero_reg); + vmerge_vx(dst, kScratchReg, dst); +} + +void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, + VSew sew, Vlmul lmul) { + VU.set(kScratchReg, sew, lmul); + vmsle_vv(v0, rhs, lhs); + li(kScratchReg, -1); + vmv_vx(dst, zero_reg); + vmerge_vx(dst, kScratchReg, dst); +} + +void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, + VSew sew, Vlmul lmul) { + VU.set(kScratchReg, sew, lmul); + vmsleu_vv(v0, rhs, lhs); + li(kScratchReg, -1); + vmv_vx(dst, zero_reg); + vmerge_vx(dst, kScratchReg, dst); +} + +void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, + VSew sew, Vlmul lmul) { + VU.set(kScratchReg, sew, lmul); + vmslt_vv(v0, rhs, lhs); + li(kScratchReg, -1); + vmv_vx(dst, zero_reg); + vmerge_vx(dst, kScratchReg, dst); +} + +void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, + VSew sew, Vlmul lmul) { + VU.set(kScratchReg, sew, lmul); + vmsltu_vv(v0, rhs, lhs); + li(kScratchReg, -1); + vmv_vx(dst, zero_reg); + vmerge_vx(dst, kScratchReg, dst); +} + +void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) { + uint64_t imm1 = *(reinterpret_cast(imms)); + uint64_t imm2 = *((reinterpret_cast(imms)) + 1); + VU.set(kScratchReg, VSew::E64, Vlmul::m1); + li(kScratchReg, 1); + vmv_vx(v0, kScratchReg); + li(kScratchReg, imm1); + vmerge_vx(dst, kScratchReg, dst); + li(kScratchReg, imm2); + vsll_vi(v0, v0, 1); + vmerge_vx(dst, kScratchReg, dst); +} + +void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx, + MemOperand src) { + if (ts == 8) { + Lbu(kScratchReg2, src); + VU.set(kScratchReg, E64, m1); + li(kScratchReg, 0x1 << laneidx); + vmv_sx(v0, kScratchReg); + VU.set(kScratchReg, E8, m1); + vmerge_vx(dst, kScratchReg2, dst); + } else if (ts == 16) { + Lhu(kScratchReg2, src); + VU.set(kScratchReg, E16, m1); + li(kScratchReg, 0x1 << laneidx); + vmv_sx(v0, kScratchReg); + vmerge_vx(dst, kScratchReg2, dst); + } else if (ts == 32) { + Lwu(kScratchReg2, src); + VU.set(kScratchReg, E32, m1); + li(kScratchReg, 0x1 << laneidx); + vmv_sx(v0, kScratchReg); + vmerge_vx(dst, kScratchReg2, dst); + } else if (ts == 64) { + Ld(kScratchReg2, src); + VU.set(kScratchReg, E64, m1); + li(kScratchReg, 0x1 << laneidx); + vmv_sx(v0, kScratchReg); + vmerge_vx(dst, kScratchReg2, dst); + } else { + UNREACHABLE(); + } +} + +void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx, + MemOperand dst) { + if (sz == 8) { + VU.set(kScratchReg, E8, m1); + vslidedown_vi(kSimd128ScratchReg, src, laneidx); + vmv_xs(kScratchReg, kSimd128ScratchReg); + Sb(kScratchReg, dst); + } else if (sz == 16) { + VU.set(kScratchReg, E16, m1); + vslidedown_vi(kSimd128ScratchReg, src, laneidx); + vmv_xs(kScratchReg, kSimd128ScratchReg); + Sh(kScratchReg, dst); + } else if (sz == 32) { + VU.set(kScratchReg, E32, m1); + vslidedown_vi(kSimd128ScratchReg, src, laneidx); + vmv_xs(kScratchReg, kSimd128ScratchReg); + Sw(kScratchReg, dst); + } else { + DCHECK_EQ(sz, 64); + VU.set(kScratchReg, E64, m1); + vslidedown_vi(kSimd128ScratchReg, src, laneidx); + vmv_xs(kScratchReg, kSimd128ScratchReg); + Sd(kScratchReg, dst); + } +} +// ----------------------------------------------------------------------------- +// Runtime calls. + +void TurboAssembler::AddOverflow64(Register dst, Register left, + const Operand& right, Register overflow) { + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register right_reg = no_reg; + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + if (!right.is_reg()) { + li(scratch, Operand(right)); + right_reg = scratch; + } else { + right_reg = right.rm(); + } + DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 && + overflow != scratch2); + DCHECK(overflow != left && overflow != right_reg); + if (dst == left || dst == right_reg) { + add(scratch2, left, right_reg); + xor_(overflow, scratch2, left); + xor_(scratch, scratch2, right_reg); + and_(overflow, overflow, scratch); + Mv(dst, scratch2); + } else { + add(dst, left, right_reg); + xor_(overflow, dst, left); + xor_(scratch, dst, right_reg); + and_(overflow, overflow, scratch); + } +} + +void TurboAssembler::SubOverflow64(Register dst, Register left, + const Operand& right, Register overflow) { + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register right_reg = no_reg; + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + if (!right.is_reg()) { + li(scratch, Operand(right)); + right_reg = scratch; + } else { + right_reg = right.rm(); + } + + DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 && + overflow != scratch2); + DCHECK(overflow != left && overflow != right_reg); + + if (dst == left || dst == right_reg) { + sub(scratch2, left, right_reg); + xor_(overflow, left, scratch2); + xor_(scratch, left, right_reg); + and_(overflow, overflow, scratch); + Mv(dst, scratch2); + } else { + sub(dst, left, right_reg); + xor_(overflow, left, dst); + xor_(scratch, left, right_reg); + and_(overflow, overflow, scratch); + } +} + +void TurboAssembler::MulOverflow32(Register dst, Register left, + const Operand& right, Register overflow) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Register right_reg = no_reg; + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + if (!right.is_reg()) { + li(scratch, Operand(right)); + right_reg = scratch; + } else { + right_reg = right.rm(); + } + + DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 && + overflow != scratch2); + DCHECK(overflow != left && overflow != right_reg); + sext_w(overflow, left); + sext_w(scratch2, right_reg); + + mul(overflow, overflow, scratch2); + sext_w(dst, overflow); + xor_(overflow, overflow, dst); +} + +void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, + SaveFPRegsMode save_doubles) { + ASM_CODE_COMMENT(this); + // All parameters are on the stack. a0 has the return value after call. + + // If the expected number of arguments of the runtime function is + // constant, we check that the actual number of arguments match the + // expectation. + CHECK(f->nargs < 0 || f->nargs == num_arguments); + + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + PrepareCEntryArgs(num_arguments); + PrepareCEntryFunction(ExternalReference::Create(f)); + Handle code = + CodeFactory::CEntry(isolate(), f->result_size, save_doubles); + Call(code, RelocInfo::CODE_TARGET); +} + +void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { + ASM_CODE_COMMENT(this); + const Runtime::Function* function = Runtime::FunctionForId(fid); + DCHECK_EQ(1, function->result_size); + if (function->nargs >= 0) { + PrepareCEntryArgs(function->nargs); + } + JumpToExternalReference(ExternalReference::Create(fid)); +} + +void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, + bool builtin_exit_frame) { + ASM_CODE_COMMENT(this); + PrepareCEntryFunction(builtin); + Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, + ArgvMode::kStack, builtin_exit_frame); + Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg)); +} + +void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) { + // Ld a Address from a constant pool. + // Record a value into constant pool. + ASM_CODE_COMMENT(this); + if (!FLAG_riscv_constant_pool) { + li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); + } else { + RecordEntry(entry, RelocInfo::OFF_HEAP_TARGET); + RecordRelocInfo(RelocInfo::OFF_HEAP_TARGET, entry); + auipc(kOffHeapTrampolineRegister, 0); + ld(kOffHeapTrampolineRegister, kOffHeapTrampolineRegister, 0); + } + Jump(kOffHeapTrampolineRegister); +} + +void MacroAssembler::LoadWeakValue(Register out, Register in, + Label* target_if_cleared) { + ASM_CODE_COMMENT(this); + Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); + And(out, in, Operand(~kWeakHeapObjectMask)); +} + +void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value, + Register scratch1, + Register scratch2) { + DCHECK_GT(value, 0); + if (FLAG_native_code_counters && counter->Enabled()) { + ASM_CODE_COMMENT(this); + // This operation has to be exactly 32-bit wide in case the external + // reference table redirects the counter to a uint32_t + // dummy_stats_counter_ field. + li(scratch2, ExternalReference::Create(counter)); + Lw(scratch1, MemOperand(scratch2)); + Add32(scratch1, scratch1, Operand(value)); + Sw(scratch1, MemOperand(scratch2)); + } +} + +void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, + Register scratch1, + Register scratch2) { + DCHECK_GT(value, 0); + if (FLAG_native_code_counters && counter->Enabled()) { + ASM_CODE_COMMENT(this); + // This operation has to be exactly 32-bit wide in case the external + // reference table redirects the counter to a uint32_t + // dummy_stats_counter_ field. + li(scratch2, ExternalReference::Create(counter)); + Lw(scratch1, MemOperand(scratch2)); + Sub32(scratch1, scratch1, Operand(value)); + Sw(scratch1, MemOperand(scratch2)); + } +} + +// ----------------------------------------------------------------------------- +// Debugging. + +void TurboAssembler::Trap() { stop(); } +void TurboAssembler::DebugBreak() { stop(); } + +void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, + Operand rt) { + if (FLAG_debug_code) Check(cc, reason, rs, rt); +} + +void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, + Operand rt) { + Label L; + BranchShort(&L, cc, rs, rt); + Abort(reason); + // Will not return here. + bind(&L); +} + +void TurboAssembler::Abort(AbortReason reason) { + Label abort_start; + bind(&abort_start); + if (FLAG_code_comments) { + const char* msg = GetAbortReason(reason); + RecordComment("Abort message: "); + RecordComment(msg); + } + + // Avoid emitting call to builtin if requested. + if (trap_on_abort()) { + ebreak(); + return; + } + + if (should_abort_hard()) { + // We don't care if we constructed a frame. Just pretend we did. + FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE); + PrepareCallCFunction(0, a0); + li(a0, Operand(static_cast(reason))); + CallCFunction(ExternalReference::abort_with_reason(), 1); + return; + } + + Move(a0, Smi::FromInt(static_cast(reason))); + + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame()) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NO_FRAME_TYPE); + Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); + } else { + Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); + } + // Will not return here. + if (is_trampoline_pool_blocked()) { + // If the calling code cares about the exact number of + // instructions generated, we insert padding here to keep the size + // of the Abort macro constant. + // Currently in debug mode with debug_code enabled the number of + // generated instructions is 10, so we use this as a maximum value. + static const int kExpectedAbortInstructions = 10; + int abort_instructions = InstructionsGeneratedSince(&abort_start); + DCHECK_LE(abort_instructions, kExpectedAbortInstructions); + while (abort_instructions++ < kExpectedAbortInstructions) { + nop(); + } + } +} + +void TurboAssembler::LoadMap(Register destination, Register object) { + ASM_CODE_COMMENT(this); + LoadTaggedPointerField(destination, + FieldMemOperand(object, HeapObject::kMapOffset)); +} + +void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { + ASM_CODE_COMMENT(this); + LoadMap(dst, cp); + LoadTaggedPointerField( + dst, FieldMemOperand( + dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); + LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index))); +} + +void TurboAssembler::StubPrologue(StackFrame::Type type) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(StackFrame::TypeToMarker(type))); + PushCommonFrame(scratch); +} + +void TurboAssembler::Prologue() { PushStandardFrame(a1); } + +void TurboAssembler::EnterFrame(StackFrame::Type type) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + Push(ra, fp); + Move(fp, sp); + if (!StackFrame::IsJavaScript(type)) { + li(scratch, Operand(StackFrame::TypeToMarker(type))); + Push(scratch); + } +#if V8_ENABLE_WEBASSEMBLY + if (type == StackFrame::WASM) Push(kWasmInstanceRegister); +#endif // V8_ENABLE_WEBASSEMBLY +} + +void TurboAssembler::LeaveFrame(StackFrame::Type type) { + ASM_CODE_COMMENT(this); + addi(sp, fp, 2 * kSystemPointerSize); + Ld(ra, MemOperand(fp, 1 * kSystemPointerSize)); + Ld(fp, MemOperand(fp, 0 * kSystemPointerSize)); +} + +void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, + StackFrame::Type frame_type) { + ASM_CODE_COMMENT(this); + DCHECK(frame_type == StackFrame::EXIT || + frame_type == StackFrame::BUILTIN_EXIT); + + // Set up the frame structure on the stack. + STATIC_ASSERT(2 * kSystemPointerSize == + ExitFrameConstants::kCallerSPDisplacement); + STATIC_ASSERT(1 * kSystemPointerSize == ExitFrameConstants::kCallerPCOffset); + STATIC_ASSERT(0 * kSystemPointerSize == ExitFrameConstants::kCallerFPOffset); + + // This is how the stack will look: + // fp + 2 (==kCallerSPDisplacement) - old stack's end + // [fp + 1 (==kCallerPCOffset)] - saved old ra + // [fp + 0 (==kCallerFPOffset)] - saved old fp + // [fp - 1 StackFrame::EXIT Smi + // [fp - 2 (==kSPOffset)] - sp of the called function + // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the + // new stack (will contain saved ra) + + // Save registers and reserve room for saved entry sp. + addi(sp, sp, + -2 * kSystemPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); + Sd(ra, MemOperand(sp, 3 * kSystemPointerSize)); + Sd(fp, MemOperand(sp, 2 * kSystemPointerSize)); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, Operand(StackFrame::TypeToMarker(frame_type))); + Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + } + // Set up new frame pointer. + addi(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); + + if (FLAG_debug_code) { + Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); + } + + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + // Save the frame pointer and the context in top. + li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + isolate())); + Sd(fp, MemOperand(scratch)); + li(scratch, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); + Sd(cp, MemOperand(scratch)); + } + + const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); + if (save_doubles) { + // The stack is already aligned to 0 modulo 8 for stores with sdc1. + int space = kNumCallerSavedFPU * kDoubleSize; + Sub64(sp, sp, Operand(space)); + int count = 0; + for (int i = 0; i < kNumFPURegisters; i++) { + if (kCallerSavedFPU.bits() & (1 << i)) { + FPURegister reg = FPURegister::from_code(i); + StoreDouble(reg, MemOperand(sp, count * kDoubleSize)); + count++; + } + } + } + + // Reserve place for the return address, stack space and an optional slot + // (used by DirectCEntry to hold the return value if a struct is + // returned) and align the frame preparing for calling the runtime function. + DCHECK_GE(stack_space, 0); + Sub64(sp, sp, Operand((stack_space + 2) * kSystemPointerSize)); + if (frame_alignment > 0) { + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + And(sp, sp, Operand(-frame_alignment)); // Align stack. + } + + // Set the exit frame sp value to point just before the return address + // location. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + addi(scratch, sp, kSystemPointerSize); + Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); +} + +void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, + bool do_return, + bool argument_count_is_length) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + BlockTrampolinePoolScope block_trampoline_pool(this); + // Optionally restore all double registers. + if (save_doubles) { + // Remember: we only need to restore kCallerSavedFPU. + Sub64(scratch, fp, + Operand(ExitFrameConstants::kFixedFrameSizeFromFp + + kNumCallerSavedFPU * kDoubleSize)); + int cout = 0; + for (int i = 0; i < kNumFPURegisters; i++) { + if (kCalleeSavedFPU.bits() & (1 << i)) { + FPURegister reg = FPURegister::from_code(i); + LoadDouble(reg, MemOperand(scratch, cout * kDoubleSize)); + cout++; + } + } + } + + // Clear top frame. + li(scratch, + ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); + Sd(zero_reg, MemOperand(scratch)); + + // Restore current context from top and clear it in debug mode. + li(scratch, + ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); + Ld(cp, MemOperand(scratch)); + + if (FLAG_debug_code) { + UseScratchRegisterScope temp(this); + Register scratch2 = temp.Acquire(); + li(scratch2, Operand(Context::kInvalidContext)); + Sd(scratch2, MemOperand(scratch)); + } + + // Pop the arguments, restore registers, and return. + Mv(sp, fp); // Respect ABI stack constraint. + Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); + Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); + + if (argument_count.is_valid()) { + if (argument_count_is_length) { + add(sp, sp, argument_count); + } else { + CalcScaledAddress(sp, sp, argument_count, kSystemPointerSizeLog2); + } + } + + addi(sp, sp, 2 * kSystemPointerSize); + + if (do_return) { + Ret(); + } +} + +int TurboAssembler::ActivationFrameAlignment() { +#if V8_HOST_ARCH_RISCV64 + // Running on the real platform. Use the alignment as mandated by the local + // environment. + // Note: This will break if we ever start generating snapshots on one RISC-V + // platform for another RISC-V platform with a different alignment. + return base::OS::ActivationFrameAlignment(); +#else // V8_HOST_ARCH_RISCV64 + // If we are using the simulator then we should always align to the expected + // alignment. As the simulator is used to generate snapshots we do not know + // if the target platform will need alignment, so this is controlled from a + // flag. + return FLAG_sim_stack_alignment; +#endif // V8_HOST_ARCH_RISCV64 +} + +void MacroAssembler::AssertStackIsAligned() { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + const int frame_alignment = ActivationFrameAlignment(); + const int frame_alignment_mask = frame_alignment - 1; + + if (frame_alignment > kSystemPointerSize) { + Label alignment_as_expected; + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + andi(scratch, sp, frame_alignment_mask); + BranchShort(&alignment_as_expected, eq, scratch, Operand(zero_reg)); + } + // Don't use Check here, as it will call Runtime_Abort re-entering here. + ebreak(); + bind(&alignment_as_expected); + } + } +} + +void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { + ASM_CODE_COMMENT(this); + if (SmiValuesAre32Bits()) { + Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset()))); + } else { + DCHECK(SmiValuesAre31Bits()); + if (COMPRESS_POINTERS_BOOL) { + Lw(dst, src); + } else { + Ld(dst, src); + } + SmiUntag(dst); + } +} + +void TurboAssembler::SmiToInt32(Register smi) { + ASM_CODE_COMMENT(this); + if (FLAG_enable_slow_asserts) { + AssertSmi(smi); + } + DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); + SmiUntag(smi); +} + +void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { + ASM_CODE_COMMENT(this); + DCHECK_EQ(0, kSmiTag); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + andi(scratch, value, kSmiTagMask); + Branch(smi_label, eq, scratch, Operand(zero_reg)); +} + +void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK_EQ(0, kSmiTag); + andi(scratch, value, kSmiTagMask); + Branch(not_smi_label, ne, scratch, Operand(zero_reg)); +} + +void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + STATIC_ASSERT(kSmiTag == 0); + DCHECK(object != kScratchReg); + andi(kScratchReg, object, kSmiTagMask); + Check(ne, reason, kScratchReg, Operand(zero_reg)); + } +} + +void TurboAssembler::AssertSmi(Register object, AbortReason reason) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + STATIC_ASSERT(kSmiTag == 0); + DCHECK(object != kScratchReg); + andi(kScratchReg, object, kSmiTagMask); + Check(eq, reason, kScratchReg, Operand(zero_reg)); + } +} + +void MacroAssembler::AssertConstructor(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + DCHECK(object != kScratchReg); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, kScratchReg); + Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, kScratchReg, + Operand(zero_reg)); + + LoadMap(kScratchReg, object); + Lbu(kScratchReg, FieldMemOperand(kScratchReg, Map::kBitFieldOffset)); + And(kScratchReg, kScratchReg, Operand(Map::Bits1::IsConstructorBit::kMask)); + Check(ne, AbortReason::kOperandIsNotAConstructor, kScratchReg, + Operand(zero_reg)); + } +} + +void MacroAssembler::AssertFunction(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + DCHECK(object != kScratchReg); + SmiTst(object, kScratchReg); + Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, kScratchReg, + Operand(zero_reg)); + push(object); + LoadMap(object, object); + UseScratchRegisterScope temps(this); + Register range = temps.Acquire(); + GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, range); + Check(Uless_equal, AbortReason::kOperandIsNotAFunction, range, + Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + pop(object); + } +} + +void MacroAssembler::AssertCallableFunction(Register object) { + if (!FLAG_debug_code) return; + ASM_CODE_COMMENT(this); + STATIC_ASSERT(kSmiTag == 0); + AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction); + push(object); + LoadMap(object, object); + UseScratchRegisterScope temps(this); + Register range = temps.Acquire(); + GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, range); + Check(Uless_equal, AbortReason::kOperandIsNotACallableFunction, range, + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); + pop(object); +} + +void MacroAssembler::AssertBoundFunction(Register object) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + DCHECK(object != kScratchReg); + SmiTst(object, kScratchReg); + Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, kScratchReg, + Operand(zero_reg)); + GetObjectType(object, kScratchReg, kScratchReg); + Check(eq, AbortReason::kOperandIsNotABoundFunction, kScratchReg, + Operand(JS_BOUND_FUNCTION_TYPE)); + } +} + +void MacroAssembler::AssertGeneratorObject(Register object) { + if (!FLAG_debug_code) return; + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + STATIC_ASSERT(kSmiTag == 0); + DCHECK(object != kScratchReg); + SmiTst(object, kScratchReg); + Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, kScratchReg, + Operand(zero_reg)); + + GetObjectType(object, kScratchReg, kScratchReg); + + Label done; + + // Check if JSGeneratorObject + BranchShort(&done, eq, kScratchReg, Operand(JS_GENERATOR_OBJECT_TYPE)); + + // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) + BranchShort(&done, eq, kScratchReg, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); + + // Check if JSAsyncGeneratorObject + BranchShort(&done, eq, kScratchReg, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); + + Abort(AbortReason::kOperandIsNotAGeneratorObject); + + bind(&done); +} + +void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, + Register scratch) { + if (FLAG_debug_code) { + ASM_CODE_COMMENT(this); + Label done_checking; + AssertNotSmi(object); + LoadRoot(scratch, RootIndex::kUndefinedValue); + BranchShort(&done_checking, eq, object, Operand(scratch)); + GetObjectType(object, scratch, scratch); + Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch, + Operand(ALLOCATION_SITE_TYPE)); + bind(&done_checking); + } +} + +template +void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1, + FPURegister src2, MaxMinKind kind) { + DCHECK((std::is_same::value) || + (std::is_same::value)); + + if (src1 == src2 && dst != src1) { + if (std::is_same::value) { + fmv_s(dst, src1); + } else { + fmv_d(dst, src1); + } + return; + } + + Label done, nan; + + // For RISCV, fmin_s returns the other non-NaN operand as result if only one + // operand is NaN; but for JS, if any operand is NaN, result is Nan. The + // following handles the discrepency between handling of NaN between ISA and + // JS semantics + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + if (std::is_same::value) { + CompareIsNotNanF32(scratch, src1, src2); + } else { + CompareIsNotNanF64(scratch, src1, src2); + } + BranchFalseF(scratch, &nan); + + if (kind == MaxMinKind::kMax) { + if (std::is_same::value) { + fmax_s(dst, src1, src2); + } else { + fmax_d(dst, src1, src2); + } + } else { + if (std::is_same::value) { + fmin_s(dst, src1, src2); + } else { + fmin_d(dst, src1, src2); + } + } + j(&done); + + bind(&nan); + // if any operand is NaN, return NaN (fadd returns NaN if any operand is NaN) + if (std::is_same::value) { + fadd_s(dst, src1, src2); + } else { + fadd_d(dst, src1, src2); + } + + bind(&done); +} + +void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, + FPURegister src2) { + ASM_CODE_COMMENT(this); + FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMax); +} + +void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, + FPURegister src2) { + ASM_CODE_COMMENT(this); + FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMin); +} + +void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, + FPURegister src2) { + ASM_CODE_COMMENT(this); + FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMax); +} + +void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, + FPURegister src2) { + ASM_CODE_COMMENT(this); + FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMin); +} + +static const int kRegisterPassedArguments = 8; + +int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments, + int num_fp_arguments) { + int stack_passed_dwords = 0; + + // Up to eight integer arguments are passed in registers a0..a7 and + // up to eight floating point arguments are passed in registers fa0..fa7 + if (num_gp_arguments > kRegisterPassedArguments) { + stack_passed_dwords += num_gp_arguments - kRegisterPassedArguments; + } + if (num_fp_arguments > kRegisterPassedArguments) { + stack_passed_dwords += num_fp_arguments - kRegisterPassedArguments; + } + stack_passed_dwords += kCArgSlotCount; + return stack_passed_dwords; +} + +void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, + int num_double_arguments, + Register scratch) { + ASM_CODE_COMMENT(this); + int frame_alignment = ActivationFrameAlignment(); + + // Up to eight simple arguments in a0..a7, fa0..fa7. + // Remaining arguments are pushed on the stack (arg slot calculation handled + // by CalculateStackPassedDWords()). + int stack_passed_arguments = + CalculateStackPassedDWords(num_reg_arguments, num_double_arguments); + if (frame_alignment > kSystemPointerSize) { + // Make stack end at alignment and make room for stack arguments and the + // original value of sp. + Mv(scratch, sp); + Sub64(sp, sp, Operand((stack_passed_arguments + 1) * kSystemPointerSize)); + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + And(sp, sp, Operand(-frame_alignment)); + Sd(scratch, MemOperand(sp, stack_passed_arguments * kSystemPointerSize)); + } else { + Sub64(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize)); + } +} + +void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, + Register scratch) { + PrepareCallCFunction(num_reg_arguments, 0, scratch); +} + +void TurboAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments) { + BlockTrampolinePoolScope block_trampoline_pool(this); + li(t6, function); + CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments); +} + +void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments) { + CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); +} + +void TurboAssembler::CallCFunction(ExternalReference function, + int num_arguments) { + CallCFunction(function, num_arguments, 0); +} + +void TurboAssembler::CallCFunction(Register function, int num_arguments) { + CallCFunction(function, num_arguments, 0); +} + +void TurboAssembler::CallCFunctionHelper(Register function, + int num_reg_arguments, + int num_double_arguments) { + DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); + DCHECK(has_frame()); + ASM_CODE_COMMENT(this); + // Make sure that the stack is aligned before calling a C function unless + // running in the simulator. The simulator has its own alignment check which + // provides more information. + // The argument stots are presumed to have been set up by + // PrepareCallCFunction. + +#if V8_HOST_ARCH_RISCV64 + if (FLAG_debug_code) { + int frame_alignment = base::OS::ActivationFrameAlignment(); + int frame_alignment_mask = frame_alignment - 1; + if (frame_alignment > kSystemPointerSize) { + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); + Label alignment_as_expected; + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + And(scratch, sp, Operand(frame_alignment_mask)); + BranchShort(&alignment_as_expected, eq, scratch, Operand(zero_reg)); + } + // Don't use Check here, as it will call Runtime_Abort possibly + // re-entering here. + ebreak(); + bind(&alignment_as_expected); + } + } +#endif // V8_HOST_ARCH_RISCV64 + + // Just call directly. The function called cannot cause a GC, or + // allow preemption, so the return address in the link register + // stays correct. + { + if (function != t6) { + Mv(t6, function); + function = t6; + } + + // Save the frame pointer and PC so that the stack layout remains + // iterable, even without an ExitFrame which normally exists between JS + // and C frames. + // 't' registers are caller-saved so this is safe as a scratch register. + Register pc_scratch = t1; + Register scratch = t2; + + auipc(pc_scratch, 0); + // See x64 code for reasoning about how to address the isolate data fields. + if (root_array_available()) { + Sd(pc_scratch, MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_pc_offset())); + Sd(fp, MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_fp_offset())); + } else { + DCHECK_NOT_NULL(isolate()); + li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); + Sd(pc_scratch, MemOperand(scratch)); + li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); + Sd(fp, MemOperand(scratch)); + } + + Call(function); + + if (isolate() != nullptr) { + // We don't unset the PC; the FP is the source of truth. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); + Sd(zero_reg, MemOperand(scratch)); + } + } + + int stack_passed_arguments = + CalculateStackPassedDWords(num_reg_arguments, num_double_arguments); + + if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { + Ld(sp, MemOperand(sp, stack_passed_arguments * kSystemPointerSize)); + } else { + Add64(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize)); + } +} + +#undef BRANCH_ARGS_CHECK + +void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, + Condition cc, Label* condition_met) { + And(scratch, object, Operand(~kPageAlignmentMask)); + Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + And(scratch, scratch, Operand(mask)); + Branch(condition_met, cc, scratch, Operand(zero_reg)); +} + +Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, + Register reg4, Register reg5, + Register reg6) { + RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6}; + + const RegisterConfiguration* config = RegisterConfiguration::Default(); + for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { + int code = config->GetAllocatableGeneralCode(i); + Register candidate = Register::from_code(code); + if (regs.has(candidate)) continue; + return candidate; + } + UNREACHABLE(); +} + +void TurboAssembler::ComputeCodeStartAddress(Register dst) { + // This push on ra and the pop below together ensure that we restore the + // register ra, which is needed while computing the code start address. + push(ra); + + auipc(ra, 0); + addi(ra, ra, kInstrSize * 2); // ra = address of li + int pc = pc_offset(); + li(dst, Operand(pc)); + Sub64(dst, ra, dst); + + pop(ra); // Restore ra +} + +void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, + DeoptimizeKind kind, Label* ret, + Label*) { + ASM_CODE_COMMENT(this); + BlockTrampolinePoolScope block_trampoline_pool(this); + Ld(t6, + MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target))); + Call(t6); + DCHECK_EQ(SizeOfCodeGeneratedSince(exit), + (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize + : Deoptimizer::kEagerDeoptExitSize); +} + +void TurboAssembler::LoadCodeObjectEntry(Register destination, + Register code_object) { + // Code objects are called differently depending on whether we are generating + // builtin code (which will later be embedded into the binary) or compiling + // user JS code at runtime. + // * Builtin code runs in --jitless mode and thus must not call into on-heap + // Code targets. Instead, we dispatch through the builtins entry table. + // * Codegen at runtime does not have this restriction and we can use the + // shorter, branchless instruction sequence. The assumption here is that + // targets are usually generated code and not builtin Code objects. + ASM_CODE_COMMENT(this); + if (options().isolate_independent_code) { + DCHECK(root_array_available()); + Label if_code_is_off_heap, out; + + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + + DCHECK(!AreAliased(destination, scratch)); + DCHECK(!AreAliased(code_object, scratch)); + + // Check whether the Code object is an off-heap trampoline. If so, call its + // (off-heap) entry point directly without going through the (on-heap) + // trampoline. Otherwise, just call the Code object as always. + + Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset)); + And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask)); + Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg)); + // Not an off-heap trampoline object, the entry point is at + // Code::raw_instruction_start(). + Add64(destination, code_object, Code::kHeaderSize - kHeapObjectTag); + Branch(&out); + + // An off-heap trampoline, the entry point is loaded from the builtin entry + // table. + bind(&if_code_is_off_heap); + Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset)); + slli(destination, scratch, kSystemPointerSizeLog2); + Add64(destination, destination, kRootRegister); + Ld(destination, + MemOperand(destination, IsolateData::builtin_entry_table_offset())); + + bind(&out); + } else { + Add64(destination, code_object, Code::kHeaderSize - kHeapObjectTag); + } +} + +void TurboAssembler::CallCodeObject(Register code_object) { + ASM_CODE_COMMENT(this); + LoadCodeObjectEntry(code_object, code_object); + Call(code_object); +} + +void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { + ASM_CODE_COMMENT(this); + DCHECK_EQ(JumpMode::kJump, jump_mode); + LoadCodeObjectEntry(code_object, code_object); + Jump(code_object); +} + +void TurboAssembler::LoadTaggedPointerField(const Register& destination, + const MemOperand& field_operand) { + if (COMPRESS_POINTERS_BOOL) { + DecompressTaggedPointer(destination, field_operand); + } else { + Ld(destination, field_operand); + } +} + +void TurboAssembler::LoadAnyTaggedField(const Register& destination, + const MemOperand& field_operand) { + if (COMPRESS_POINTERS_BOOL) { + DecompressAnyTagged(destination, field_operand); + } else { + Ld(destination, field_operand); + } +} + +void TurboAssembler::LoadTaggedSignedField(const Register& destination, + const MemOperand& field_operand) { + if (COMPRESS_POINTERS_BOOL) { + DecompressTaggedSigned(destination, field_operand); + } else { + Ld(destination, field_operand); + } +} + +void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { + SmiUntag(dst, src); +} + +void TurboAssembler::StoreTaggedField(const Register& value, + const MemOperand& dst_field_operand) { + if (COMPRESS_POINTERS_BOOL) { + Sw(value, dst_field_operand); + } else { + Sd(value, dst_field_operand); + } +} + +void TurboAssembler::DecompressTaggedSigned(const Register& destination, + const MemOperand& field_operand) { + ASM_CODE_COMMENT(this); + Lwu(destination, field_operand); + if (FLAG_debug_code) { + // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits. + Add64(destination, destination, + Operand(((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32)); + } +} + +void TurboAssembler::DecompressTaggedPointer(const Register& destination, + const MemOperand& field_operand) { + ASM_CODE_COMMENT(this); + Lwu(destination, field_operand); + Add64(destination, kPtrComprCageBaseRegister, destination); +} + +void TurboAssembler::DecompressTaggedPointer(const Register& destination, + const Register& source) { + ASM_CODE_COMMENT(this); + And(destination, source, Operand(0xFFFFFFFF)); + Add64(destination, kPtrComprCageBaseRegister, Operand(destination)); +} + +void TurboAssembler::DecompressAnyTagged(const Register& destination, + const MemOperand& field_operand) { + ASM_CODE_COMMENT(this); + Lwu(destination, field_operand); + Add64(destination, kPtrComprCageBaseRegister, destination); +} + +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, + ArgumentsCountMode mode, Register scratch) { + switch (type) { + case kCountIsInteger: { + CalcScaledAddress(sp, sp, count, kPointerSizeLog2); + break; + } + case kCountIsSmi: { + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + DCHECK_NE(scratch, no_reg); + SmiScale(scratch, count, kPointerSizeLog2); + Add64(sp, sp, scratch); + break; + } + case kCountIsBytes: { + Add64(sp, sp, count); + break; + } + } + if (mode == kCountExcludesReceiver) { + Add64(sp, sp, kSystemPointerSize); + } +} + +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, + Register receiver, + ArgumentsCountType type, + ArgumentsCountMode mode, + Register scratch) { + DCHECK(!AreAliased(argc, receiver)); + if (mode == kCountExcludesReceiver) { + // Drop arguments without receiver and override old receiver. + DropArguments(argc, type, kCountIncludesReceiver, scratch); + Sd(receiver, MemOperand(sp)); + } else { + DropArguments(argc, type, mode, scratch); + push(receiver); + } +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_RISCV64 diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h new file mode 100644 index 00000000000000..cb738a26dc951f --- /dev/null +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h @@ -0,0 +1,1363 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H +#error This header must be included via macro-assembler.h +#endif + +#ifndef V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_ +#define V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_ + +#include "src/codegen/assembler.h" +#include "src/codegen/riscv64/assembler-riscv64.h" +#include "src/common/globals.h" +#include "src/execution/isolate-data.h" +#include "src/objects/tagged-index.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +enum class AbortReason : uint8_t; + +// Reserved Register Usage Summary. +// +// Registers t5, t6, and t3 are reserved for use by the MacroAssembler. +// +// The programmer should know that the MacroAssembler may clobber these three, +// but won't touch other registers except in special cases. +// +// TODO(RISCV): Cannot find info about this ABI. We chose t6 for now. +// Per the RISC-V ABI, register t6 must be used for indirect function call +// via 'jalr t6' or 'jr t6' instructions. This is relied upon by gcc when +// trying to update gp register for position-independent-code. Whenever +// RISC-V generated code calls C code, it must be via t6 register. + +// Flags used for LeaveExitFrame function. +enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false }; + +// Flags used for the li macro-assembler function. +enum LiFlags { + // If the constant value can be represented in just 16 bits, then + // optimize the li to use a single instruction, rather than lui/ori/slli + // sequence. A number of other optimizations that emits less than + // maximum number of instructions exists. + OPTIMIZE_SIZE = 0, + // Always use 8 instructions (lui/addi/slliw sequence), even if the + // constant + // could be loaded with just one, so that this value is patchable later. + CONSTANT_SIZE = 1, + // For address loads 8 instruction are required. Used to mark + // constant load that will be used as address without relocation + // information. It ensures predictable code size, so specific sites + // in code are patchable. + ADDRESS_LOAD = 2 +}; + +enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; + +Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg); + +// ----------------------------------------------------------------------------- +// Static helper functions. + +#if defined(V8_TARGET_LITTLE_ENDIAN) +#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2) +#else +#define SmiWordOffset(offset) offset +#endif + +// Generate a MemOperand for loading a field from an object. +inline MemOperand FieldMemOperand(Register object, int offset) { + return MemOperand(object, offset - kHeapObjectTag); +} + +// Generate a MemOperand for storing arguments 5..N on the stack +// when calling CallCFunction(). +// TODO(plind): Currently ONLY used for O32. Should be fixed for +// n64, and used in RegExp code, and other places +// with more than 8 arguments. +inline MemOperand CFunctionArgumentOperand(int index) { + DCHECK_GT(index, kCArgSlotCount); + // Argument 5 takes the slot just past the four Arg-slots. + int offset = (index - 5) * kSystemPointerSize + kCArgsSlotsSize; + return MemOperand(sp, offset); +} + +class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { + public: + using TurboAssemblerBase::TurboAssemblerBase; + + // Activation support. + void EnterFrame(StackFrame::Type type); + void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { + // Out-of-line constant pool not implemented on RISC-V. + UNREACHABLE(); + } + void LeaveFrame(StackFrame::Type type); + + // Generates function and stub prologue code. + void StubPrologue(StackFrame::Type type); + void Prologue(); + + void InitializeRootRegister() { + ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); + li(kRootRegister, Operand(isolate_root)); +#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE + LoadRootRelative(kPtrComprCageBaseRegister, + IsolateData::cage_base_offset()); +#endif + } + + // Jump unconditionally to given label. + void jmp(Label* L) { Branch(L); } + + // ------------------------------------------------------------------------- + // Debugging. + + void Trap(); + void DebugBreak(); + + // Calls Abort(msg) if the condition cc is not satisfied. + // Use --debug_code to enable. + void Assert(Condition cc, AbortReason reason, Register rs, Operand rt); + + // Like Assert(), but always enabled. + void Check(Condition cc, AbortReason reason, Register rs, Operand rt); + + // Print a message to stdout and abort execution. + void Abort(AbortReason msg); + + // Arguments macros. +#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2 +#define COND_ARGS cond, r1, r2 + + // Cases when relocation is not needed. +#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \ + void Name(target_type target); \ + void Name(target_type target, COND_TYPED_ARGS); + +#define DECLARE_BRANCH_PROTOTYPES(Name) \ + DECLARE_NORELOC_PROTOTYPE(Name, Label*) \ + DECLARE_NORELOC_PROTOTYPE(Name, int32_t) + + DECLARE_BRANCH_PROTOTYPES(BranchAndLink) + DECLARE_BRANCH_PROTOTYPES(BranchShort) + + void Branch(Label* target); + void Branch(int32_t target); + void BranchLong(Label* L); + void Branch(Label* target, Condition cond, Register r1, const Operand& r2, + Label::Distance near_jump = Label::kFar); + void Branch(int32_t target, Condition cond, Register r1, const Operand& r2, + Label::Distance near_jump = Label::kFar); +#undef DECLARE_BRANCH_PROTOTYPES +#undef COND_TYPED_ARGS +#undef COND_ARGS + + void AllocateStackSpace(Register bytes) { Sub64(sp, sp, bytes); } + + void AllocateStackSpace(int bytes) { + DCHECK_GE(bytes, 0); + if (bytes == 0) return; + Sub64(sp, sp, Operand(bytes)); + } + + inline void NegateBool(Register rd, Register rs) { Xor(rd, rs, 1); } + + // Compare float, if any operand is NaN, result is false except for NE + void CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, + FPURegister cmp2); + // Compare double, if any operand is NaN, result is false except for NE + void CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, + FPURegister cmp2); + void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2); + void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2); + void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2); + void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2); + + // Floating point branches + void BranchTrueShortF(Register rs, Label* target); + void BranchFalseShortF(Register rs, Label* target); + + void BranchTrueF(Register rs, Label* target); + void BranchFalseF(Register rs, Label* target); + + void Branch(Label* L, Condition cond, Register rs, RootIndex index); + + static int InstrCountForLi64Bit(int64_t value); + inline void LiLower32BitHelper(Register rd, Operand j); + void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); + // Load int32 in the rd register. + void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); + inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { + li(rd, Operand(j), mode); + } + + inline void Move(Register output, MemOperand operand) { Ld(output, operand); } + void li(Register dst, Handle value, + RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT); + void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); + void li(Register dst, const StringConstantBase* string, + LiFlags mode = OPTIMIZE_SIZE); + + void LoadFromConstantsTable(Register destination, int constant_index) final; + void LoadRootRegisterOffset(Register destination, intptr_t offset) final; + void LoadRootRelative(Register destination, int32_t offset) final; + + inline void GenPCRelativeJump(Register rd, int64_t imm32) { + DCHECK(is_int32(imm32 + 0x800)); + int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12); + int32_t Lo12 = (int32_t)imm32 << 20 >> 20; + auipc(rd, Hi20); // Read PC + Hi20 into scratch. + jr(rd, Lo12); // jump PC + Hi20 + Lo12 + } + + inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32) { + DCHECK(is_int32(imm32 + 0x800)); + int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12); + int32_t Lo12 = (int32_t)imm32 << 20 >> 20; + auipc(rd, Hi20); // Read PC + Hi20 into scratch. + jalr(rd, Lo12); // jump PC + Hi20 + Lo12 + } +// Jump, Call, and Ret pseudo instructions implementing inter-working. +#define COND_ARGS \ + Condition cond = al, Register rs = zero_reg, \ + const Operand &rt = Operand(zero_reg) + + void Jump(Register target, COND_ARGS); + void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); + void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); + // Deffer from li, this method save target to the memory, and then load + // it to register use ld, it can be used in wasm jump table for concurrent + // patching. + void PatchAndJump(Address target); + void Jump(Handle code, RelocInfo::Mode rmode, COND_ARGS); + void Jump(const ExternalReference& reference); + void Call(Register target, COND_ARGS); + void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); + void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + COND_ARGS); + void Call(Label* target); + void LoadAddress( + Register dst, Label* target, + RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED); + + // Load the builtin given by the Smi in |builtin| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin); + void LoadEntryFromBuiltin(Builtin builtin, Register destination); + MemOperand EntryFromBuiltinAsOperand(Builtin builtin); + void CallBuiltinByIndex(Register builtin); + void CallBuiltin(Builtin builtin); + void TailCallBuiltin(Builtin builtin); + + void LoadCodeObjectEntry(Register destination, Register code_object); + void CallCodeObject(Register code_object); + void JumpCodeObject(Register code_object, + JumpMode jump_mode = JumpMode::kJump); + + // Generates an instruction sequence s.t. the return address points to the + // instruction following the call. + // The return address on the stack is used by frame iteration. + void StoreReturnAddressAndCall(Register target); + + void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, + DeoptimizeKind kind, Label* ret, + Label* jump_deoptimization_entry_label); + + void Ret(COND_ARGS); + + // Emit code to discard a non-negative number of pointer-sized elements + // from the stack, clobbering only the sp register. + void Drop(int count, Condition cond = cc_always, Register reg = no_reg, + const Operand& op = Operand(no_reg)); + + // Trivial case of DropAndRet that only emits 2 instructions. + void DropAndRet(int drop); + + void DropAndRet(int drop, Condition cond, Register reg, const Operand& op); + + void Ld(Register rd, const MemOperand& rs); + void Sd(Register rd, const MemOperand& rs); + + void push(Register src) { + Add64(sp, sp, Operand(-kSystemPointerSize)); + Sd(src, MemOperand(sp, 0)); + } + void Push(Register src) { push(src); } + void Push(Handle handle); + void Push(Smi smi); + + // Push two registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2) { + Sub64(sp, sp, Operand(2 * kSystemPointerSize)); + Sd(src1, MemOperand(sp, 1 * kSystemPointerSize)); + Sd(src2, MemOperand(sp, 0 * kSystemPointerSize)); + } + + // Push three registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3) { + Sub64(sp, sp, Operand(3 * kSystemPointerSize)); + Sd(src1, MemOperand(sp, 2 * kSystemPointerSize)); + Sd(src2, MemOperand(sp, 1 * kSystemPointerSize)); + Sd(src3, MemOperand(sp, 0 * kSystemPointerSize)); + } + + // Push four registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3, Register src4) { + Sub64(sp, sp, Operand(4 * kSystemPointerSize)); + Sd(src1, MemOperand(sp, 3 * kSystemPointerSize)); + Sd(src2, MemOperand(sp, 2 * kSystemPointerSize)); + Sd(src3, MemOperand(sp, 1 * kSystemPointerSize)); + Sd(src4, MemOperand(sp, 0 * kSystemPointerSize)); + } + + // Push five registers. Pushes leftmost register first (to highest address). + void Push(Register src1, Register src2, Register src3, Register src4, + Register src5) { + Sub64(sp, sp, Operand(5 * kSystemPointerSize)); + Sd(src1, MemOperand(sp, 4 * kSystemPointerSize)); + Sd(src2, MemOperand(sp, 3 * kSystemPointerSize)); + Sd(src3, MemOperand(sp, 2 * kSystemPointerSize)); + Sd(src4, MemOperand(sp, 1 * kSystemPointerSize)); + Sd(src5, MemOperand(sp, 0 * kSystemPointerSize)); + } + + void Push(Register src, Condition cond, Register tst1, Register tst2) { + // Since we don't have conditional execution we use a Branch. + Branch(3, cond, tst1, Operand(tst2)); + Sub64(sp, sp, Operand(kSystemPointerSize)); + Sd(src, MemOperand(sp, 0)); + } + + enum PushArrayOrder { kNormal, kReverse }; + void PushArray(Register array, Register size, PushArrayOrder order = kNormal); + + void MaybeSaveRegisters(RegList registers); + void MaybeRestoreRegisters(RegList registers); + + void CallEphemeronKeyBarrier(Register object, Register slot_address, + SaveFPRegsMode fp_mode); + + void CallRecordWriteStubSaveRegisters( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode = StubCallMode::kCallBuiltinPointer); + void CallRecordWriteStub( + Register object, Register slot_address, + RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, + StubCallMode mode = StubCallMode::kCallBuiltinPointer); + + // Push multiple registers on the stack. + // Registers are saved in numerical order, with higher numbered registers + // saved in higher memory addresses. + void MultiPush(RegList regs); + void MultiPushFPU(DoubleRegList regs); + + // Calculate how much stack space (in bytes) are required to store caller + // registers excluding those specified in the arguments. + int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg) const; + + // Push caller saved registers on the stack, and return the number of bytes + // stack pointer is adjusted. + int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg); + // Restore caller saved registers from the stack, and return the number of + // bytes stack pointer is adjusted. + int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg); + + void pop(Register dst) { + Ld(dst, MemOperand(sp, 0)); + Add64(sp, sp, Operand(kSystemPointerSize)); + } + void Pop(Register dst) { pop(dst); } + + // Pop two registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2) { + DCHECK(src1 != src2); + Ld(src2, MemOperand(sp, 0 * kSystemPointerSize)); + Ld(src1, MemOperand(sp, 1 * kSystemPointerSize)); + Add64(sp, sp, 2 * kSystemPointerSize); + } + + // Pop three registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2, Register src3) { + Ld(src3, MemOperand(sp, 0 * kSystemPointerSize)); + Ld(src2, MemOperand(sp, 1 * kSystemPointerSize)); + Ld(src1, MemOperand(sp, 2 * kSystemPointerSize)); + Add64(sp, sp, 3 * kSystemPointerSize); + } + + void Pop(uint32_t count = 1) { + Add64(sp, sp, Operand(count * kSystemPointerSize)); + } + + // Pops multiple values from the stack and load them in the + // registers specified in regs. Pop order is the opposite as in MultiPush. + void MultiPop(RegList regs); + void MultiPopFPU(DoubleRegList regs); + +#define DEFINE_INSTRUCTION(instr) \ + void instr(Register rd, Register rs, const Operand& rt); \ + void instr(Register rd, Register rs, Register rt) { \ + instr(rd, rs, Operand(rt)); \ + } \ + void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); } + +#define DEFINE_INSTRUCTION2(instr) \ + void instr(Register rs, const Operand& rt); \ + void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \ + void instr(Register rs, int32_t j) { instr(rs, Operand(j)); } + +#define DEFINE_INSTRUCTION3(instr) void instr(Register rd, int64_t imm); + + DEFINE_INSTRUCTION(Add32) + DEFINE_INSTRUCTION(Add64) + DEFINE_INSTRUCTION(Div32) + DEFINE_INSTRUCTION(Divu32) + DEFINE_INSTRUCTION(Divu64) + DEFINE_INSTRUCTION(Mod32) + DEFINE_INSTRUCTION(Modu32) + DEFINE_INSTRUCTION(Div64) + DEFINE_INSTRUCTION(Sub32) + DEFINE_INSTRUCTION(Sub64) + DEFINE_INSTRUCTION(Mod64) + DEFINE_INSTRUCTION(Modu64) + DEFINE_INSTRUCTION(Mul32) + DEFINE_INSTRUCTION(Mulh32) + DEFINE_INSTRUCTION(Mul64) + DEFINE_INSTRUCTION(Mulh64) + DEFINE_INSTRUCTION2(Div32) + DEFINE_INSTRUCTION2(Div64) + DEFINE_INSTRUCTION2(Divu32) + DEFINE_INSTRUCTION2(Divu64) + + DEFINE_INSTRUCTION(And) + DEFINE_INSTRUCTION(Or) + DEFINE_INSTRUCTION(Xor) + DEFINE_INSTRUCTION(Nor) + DEFINE_INSTRUCTION2(Neg) + + DEFINE_INSTRUCTION(Slt) + DEFINE_INSTRUCTION(Sltu) + DEFINE_INSTRUCTION(Sle) + DEFINE_INSTRUCTION(Sleu) + DEFINE_INSTRUCTION(Sgt) + DEFINE_INSTRUCTION(Sgtu) + DEFINE_INSTRUCTION(Sge) + DEFINE_INSTRUCTION(Sgeu) + DEFINE_INSTRUCTION(Seq) + DEFINE_INSTRUCTION(Sne) + + DEFINE_INSTRUCTION(Sll64) + DEFINE_INSTRUCTION(Sra64) + DEFINE_INSTRUCTION(Srl64) + DEFINE_INSTRUCTION(Sll32) + DEFINE_INSTRUCTION(Sra32) + DEFINE_INSTRUCTION(Srl32) + + DEFINE_INSTRUCTION2(Seqz) + DEFINE_INSTRUCTION2(Snez) + + DEFINE_INSTRUCTION(Ror) + DEFINE_INSTRUCTION(Dror) + + DEFINE_INSTRUCTION3(Li) + DEFINE_INSTRUCTION2(Mv) + +#undef DEFINE_INSTRUCTION +#undef DEFINE_INSTRUCTION2 +#undef DEFINE_INSTRUCTION3 + + void SmiUntag(Register dst, const MemOperand& src); + void SmiUntag(Register dst, Register src) { + DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); + if (COMPRESS_POINTERS_BOOL) { + sraiw(dst, src, kSmiShift); + } else { + srai(dst, src, kSmiShift); + } + } + + void SmiUntag(Register reg) { SmiUntag(reg, reg); } + void SmiToInt32(Register smi); + + // Enabled via --debug-code. + void AssertNotSmi(Register object, + AbortReason reason = AbortReason::kOperandIsASmi); + void AssertSmi(Register object, + AbortReason reason = AbortReason::kOperandIsASmi); + + int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments); + + // Before calling a C-function from generated code, align arguments on stack. + // After aligning the frame, non-register arguments must be stored on the + // stack, using helper: CFunctionArgumentOperand(). + // The argument count assumes all arguments are word sized. + // Some compilers/platforms require the stack to be aligned when calling + // C++ code. + // Needs a scratch register to do some arithmetic. This register will be + // trashed. + void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, + Register scratch); + void PrepareCallCFunction(int num_reg_arguments, Register scratch); + + // Arguments 1-8 are placed in registers a0 through a7 respectively. + // Arguments 9..n are stored to stack + + // Calls a C function and cleans up the space for arguments allocated + // by PrepareCallCFunction. The called function is not allowed to trigger a + // garbage collection, since that might move the code and invalidate the + // return address (unless this is somehow accounted for by the called + // function). + void CallCFunction(ExternalReference function, int num_arguments); + void CallCFunction(Register function, int num_arguments); + void CallCFunction(ExternalReference function, int num_reg_arguments, + int num_double_arguments); + void CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments); + void MovFromFloatResult(DoubleRegister dst); + void MovFromFloatParameter(DoubleRegister dst); + + // These functions abstract parameter passing for the three different ways + // we call C functions from generated code. + void MovToFloatParameter(DoubleRegister src); + void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); + void MovToFloatResult(DoubleRegister src); + + // See comments at the beginning of Builtins::Generate_CEntry. + inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } + inline void PrepareCEntryFunction(const ExternalReference& ref) { + li(a1, ref); + } + + void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, + Label* condition_met); +#undef COND_ARGS + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. + void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, + DoubleRegister double_input, StubCallMode stub_mode); + + void CompareI(Register rd, Register rs, const Operand& rt, Condition cond); + + void LoadZeroIfConditionNotZero(Register dest, Register condition); + void LoadZeroIfConditionZero(Register dest, Register condition); + + void SignExtendByte(Register rd, Register rs) { + slli(rd, rs, 64 - 8); + srai(rd, rd, 64 - 8); + } + + void SignExtendShort(Register rd, Register rs) { + slli(rd, rs, 64 - 16); + srai(rd, rd, 64 - 16); + } + + void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); } + void ZeroExtendWord(Register rd, Register rs) { + slli(rd, rs, 32); + srli(rd, rd, 32); + } + + void Clz32(Register rd, Register rs); + void Clz64(Register rd, Register rs); + void Ctz32(Register rd, Register rs); + void Ctz64(Register rd, Register rs); + void Popcnt32(Register rd, Register rs, Register scratch); + void Popcnt64(Register rd, Register rs, Register scratch); + + // Bit field starts at bit pos and extending for size bits is extracted from + // rs and stored zero/sign-extended and right-justified in rt + void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size, + bool sign_extend = false); + void ExtractBits(Register dest, Register source, Register pos, int size, + bool sign_extend = false) { + sra(dest, source, pos); + ExtractBits(dest, dest, 0, size, sign_extend); + } + + // Insert bits [0, size) of source to bits [pos, pos+size) of dest + void InsertBits(Register dest, Register source, Register pos, int size); + + void Neg_s(FPURegister fd, FPURegister fs); + void Neg_d(FPURegister fd, FPURegister fs); + + // Change endianness + void ByteSwap(Register dest, Register src, int operand_size, + Register scratch); + + void Clear_if_nan_d(Register rd, FPURegister fs); + void Clear_if_nan_s(Register rd, FPURegister fs); + // Convert single to unsigned word. + void Trunc_uw_s(Register rd, FPURegister fs, Register result = no_reg); + + // helper functions for unaligned load/store + template + void UnalignedLoadHelper(Register rd, const MemOperand& rs); + template + void UnalignedStoreHelper(Register rd, const MemOperand& rs, + Register scratch_other = no_reg); + + template + void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, + Register scratch); + template + void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs, + Register scratch); + + template + void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator); + template + void AlignedStoreHelper(Reg_T value, const MemOperand& rs, Func generator); + + template + void LoadNBytes(Register rd, const MemOperand& rs, Register scratch); + template + void LoadNBytesOverwritingBaseReg(const MemOperand& rs, Register scratch0, + Register scratch1); + // load/store macros + void Ulh(Register rd, const MemOperand& rs); + void Ulhu(Register rd, const MemOperand& rs); + void Ush(Register rd, const MemOperand& rs); + + void Ulw(Register rd, const MemOperand& rs); + void Ulwu(Register rd, const MemOperand& rs); + void Usw(Register rd, const MemOperand& rs); + + void Uld(Register rd, const MemOperand& rs); + void Usd(Register rd, const MemOperand& rs); + + void ULoadFloat(FPURegister fd, const MemOperand& rs, Register scratch); + void UStoreFloat(FPURegister fd, const MemOperand& rs, Register scratch); + + void ULoadDouble(FPURegister fd, const MemOperand& rs, Register scratch); + void UStoreDouble(FPURegister fd, const MemOperand& rs, Register scratch); + + void Lb(Register rd, const MemOperand& rs); + void Lbu(Register rd, const MemOperand& rs); + void Sb(Register rd, const MemOperand& rs); + + void Lh(Register rd, const MemOperand& rs); + void Lhu(Register rd, const MemOperand& rs); + void Sh(Register rd, const MemOperand& rs); + + void Lw(Register rd, const MemOperand& rs); + void Lwu(Register rd, const MemOperand& rs); + void Sw(Register rd, const MemOperand& rs); + + void LoadFloat(FPURegister fd, const MemOperand& src); + void StoreFloat(FPURegister fs, const MemOperand& dst); + + void LoadDouble(FPURegister fd, const MemOperand& src); + void StoreDouble(FPURegister fs, const MemOperand& dst); + + void Ll(Register rd, const MemOperand& rs); + void Sc(Register rd, const MemOperand& rs); + + void Lld(Register rd, const MemOperand& rs); + void Scd(Register rd, const MemOperand& rs); + + void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2); + void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2); + void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2); + void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2); + template + void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2, + MaxMinKind kind); + + bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } + bool IsSingleZeroRegSet() { return has_single_zero_reg_set_; } + + inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); } + + inline void Move(Register dst, Register src) { + if (dst != src) { + mv(dst, src); + } + } + + inline void MoveDouble(FPURegister dst, FPURegister src) { + if (dst != src) fmv_d(dst, src); + } + + inline void MoveFloat(FPURegister dst, FPURegister src) { + if (dst != src) fmv_s(dst, src); + } + + inline void Move(FPURegister dst, FPURegister src) { MoveDouble(dst, src); } + + inline void Move(Register dst_low, Register dst_high, FPURegister src) { + fmv_x_d(dst_high, src); + fmv_x_w(dst_low, src); + srli(dst_high, dst_high, 32); + } + + inline void Move(Register dst, FPURegister src) { fmv_x_d(dst, src); } + + inline void Move(FPURegister dst, Register src) { fmv_d_x(dst, src); } + + // Extract sign-extended word from high-half of FPR to GPR + inline void ExtractHighWordFromF64(Register dst_high, FPURegister src) { + fmv_x_d(dst_high, src); + srai(dst_high, dst_high, 32); + } + + // Insert low-word from GPR (src_high) to the high-half of FPR (dst) + void InsertHighWordF64(FPURegister dst, Register src_high); + + // Extract sign-extended word from low-half of FPR to GPR + inline void ExtractLowWordFromF64(Register dst_low, FPURegister src) { + fmv_x_w(dst_low, src); + } + + // Insert low-word from GPR (src_high) to the low-half of FPR (dst) + void InsertLowWordF64(FPURegister dst, Register src_low); + + void LoadFPRImmediate(FPURegister dst, float imm) { + LoadFPRImmediate(dst, bit_cast(imm)); + } + void LoadFPRImmediate(FPURegister dst, double imm) { + LoadFPRImmediate(dst, bit_cast(imm)); + } + void LoadFPRImmediate(FPURegister dst, uint32_t src); + void LoadFPRImmediate(FPURegister dst, uint64_t src); + + // AddOverflow64 sets overflow register to a negative value if + // overflow occured, otherwise it is zero or positive + void AddOverflow64(Register dst, Register left, const Operand& right, + Register overflow); + // SubOverflow64 sets overflow register to a negative value if + // overflow occured, otherwise it is zero or positive + void SubOverflow64(Register dst, Register left, const Operand& right, + Register overflow); + // MulOverflow32 sets overflow register to zero if no overflow occured + void MulOverflow32(Register dst, Register left, const Operand& right, + Register overflow); + + // MIPS-style 32-bit unsigned mulh + void Mulhu32(Register dst, Register left, const Operand& right, + Register left_zero, Register right_zero); + + // Number of instructions needed for calculation of switch table entry address + static const int kSwitchTablePrologueSize = 6; + + // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a + // functor/function with 'Label *func(size_t index)' declaration. + template + void GenerateSwitchTable(Register index, size_t case_count, + Func GetLabelFunction); + + // Load an object from the root table. + void LoadRoot(Register destination, RootIndex index) final; + void LoadRoot(Register destination, RootIndex index, Condition cond, + Register src1, const Operand& src2); + + void LoadMap(Register destination, Register object); + + // If the value is a NaN, canonicalize the value else, do nothing. + void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); + + // --------------------------------------------------------------------------- + // FPU macros. These do not handle special cases like NaN or +- inf. + + // Convert unsigned word to double. + void Cvt_d_uw(FPURegister fd, Register rs); + + // convert signed word to double. + void Cvt_d_w(FPURegister fd, Register rs); + + // Convert unsigned long to double. + void Cvt_d_ul(FPURegister fd, Register rs); + + // Convert unsigned word to float. + void Cvt_s_uw(FPURegister fd, Register rs); + + // convert signed word to float. + void Cvt_s_w(FPURegister fd, Register rs); + + // Convert unsigned long to float. + void Cvt_s_ul(FPURegister fd, Register rs); + + // Convert double to unsigned word. + void Trunc_uw_d(Register rd, FPURegister fs, Register result = no_reg); + + // Convert double to signed word. + void Trunc_w_d(Register rd, FPURegister fs, Register result = no_reg); + + // Convert single to signed word. + void Trunc_w_s(Register rd, FPURegister fs, Register result = no_reg); + + // Convert double to unsigned long. + void Trunc_ul_d(Register rd, FPURegister fs, Register result = no_reg); + + // Convert singled to signed long. + void Trunc_l_d(Register rd, FPURegister fs, Register result = no_reg); + + // Convert single to unsigned long. + void Trunc_ul_s(Register rd, FPURegister fs, Register result = no_reg); + + // Convert singled to signed long. + void Trunc_l_s(Register rd, FPURegister fs, Register result = no_reg); + + // Round single to signed word. + void Round_w_s(Register rd, FPURegister fs, Register result = no_reg); + + // Round double to signed word. + void Round_w_d(Register rd, FPURegister fs, Register result = no_reg); + + // Ceil single to signed word. + void Ceil_w_s(Register rd, FPURegister fs, Register result = no_reg); + + // Ceil double to signed word. + void Ceil_w_d(Register rd, FPURegister fs, Register result = no_reg); + + // Floor single to signed word. + void Floor_w_s(Register rd, FPURegister fs, Register result = no_reg); + + // Floor double to signed word. + void Floor_w_d(Register rd, FPURegister fs, Register result = no_reg); + + // Round double functions + void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + + // Round float functions + void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch); + + void Ceil_f(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + + void Ceil_d(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + + void Floor_f(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + void Floor_d(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + void Trunc_f(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + void Trunc_d(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + void Round_f(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + void Round_d(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch); + + // ------------------------------------------------------------------------- + // Smi utilities. + + void SmiTag(Register dst, Register src) { + STATIC_ASSERT(kSmiTag == 0); + if (SmiValuesAre32Bits()) { + // Smi goes to upper 32 + slli(dst, src, 32); + } else { + DCHECK(SmiValuesAre31Bits()); + // Smi is shifted left by 1 + Add32(dst, src, src); + } + } + + void SmiTag(Register reg) { SmiTag(reg, reg); } + + // Jump the register contains a smi. + void JumpIfSmi(Register value, Label* smi_label); + + void JumpIfEqual(Register a, int32_t b, Label* dest) { + Branch(dest, eq, a, Operand(b)); + } + + void JumpIfLessThan(Register a, int32_t b, Label* dest) { + Branch(dest, lt, a, Operand(b)); + } + + // Push a standard frame, consisting of ra, fp, context and JS function. + void PushStandardFrame(Register function_reg); + + // Get the actual activation frame alignment for target environment. + static int ActivationFrameAlignment(); + + // Calculated scaled address (rd) as rt + rs << sa + void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa); + + // Compute the start of the generated instruction stream from the current PC. + // This is an alternative to embedding the {CodeObject} handle as a reference. + void ComputeCodeStartAddress(Register dst); + + // Control-flow integrity: + + // Define a function entrypoint. This doesn't emit any code for this + // architecture, as control-flow integrity is not supported for it. + void CodeEntry() {} + // Define an exception handler. + void ExceptionHandler() {} + // Define an exception handler and bind a label. + void BindExceptionHandler(Label* label) { bind(label); } + + // --------------------------------------------------------------------------- + // Pointer compression Support + + // Loads a field containing a HeapObject and decompresses it if pointer + // compression is enabled. + void LoadTaggedPointerField(const Register& destination, + const MemOperand& field_operand); + + // Loads a field containing any tagged value and decompresses it if necessary. + void LoadAnyTaggedField(const Register& destination, + const MemOperand& field_operand); + + // Loads a field containing a tagged signed value and decompresses it if + // necessary. + void LoadTaggedSignedField(const Register& destination, + const MemOperand& field_operand); + + // Loads a field containing smi value and untags it. + void SmiUntagField(Register dst, const MemOperand& src); + + // Compresses and stores tagged value to given on-heap location. + void StoreTaggedField(const Register& value, + const MemOperand& dst_field_operand); + + void DecompressTaggedSigned(const Register& destination, + const MemOperand& field_operand); + void DecompressTaggedPointer(const Register& destination, + const MemOperand& field_operand); + void DecompressTaggedPointer(const Register& destination, + const Register& source); + void DecompressAnyTagged(const Register& destination, + const MemOperand& field_operand); + void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) { + if (COMPRESS_POINTERS_BOOL) { + Sub32(rd, rs1, rs2); + } else { + Sub64(rd, rs1, rs2); + } + } + // Wasm into RVV + void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew, + Vlmul lmul) { + VU.set(kScratchReg, sew, lmul); + VRegister Vsrc = idx != 0 ? kSimd128ScratchReg : src; + if (idx != 0) { + vslidedown_vi(kSimd128ScratchReg, src, idx); + } + vmv_xs(dst, Vsrc); + } + + void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, + Vlmul lmul); + + void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, + Vlmul lmul); + void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, + Vlmul lmul); + void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, + Vlmul lmul); + void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, + Vlmul lmul); + void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, + Vlmul lmul); + void WasmRvvS128const(VRegister dst, const uint8_t imms[16]); + + void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src); + void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst); + + protected: + inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); + inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); + + private: + bool has_double_zero_reg_set_ = false; + bool has_single_zero_reg_set_ = false; + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + // TODO(RISCV) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); + + void BranchShortHelper(int32_t offset, Label* L); + bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt); + bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt); + + void BranchAndLinkShortHelper(int32_t offset, Label* L); + void BranchAndLinkShort(int32_t offset); + void BranchAndLinkShort(Label* L); + bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + void BranchAndLinkLong(Label* L); + + template + void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, + RoundingMode mode); + + template + void RoundHelper(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch, RoundingMode frm); + + template + void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, + TruncFunc trunc); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); +}; + +// MacroAssembler implements a collection of frequently used macros. +class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { + public: + using TurboAssembler::TurboAssembler; + + // It assumes that the arguments are located below the stack pointer. + // argc is the number of arguments not including the receiver. + // TODO(victorgomes): Remove this function once we stick with the reversed + // arguments order. + void LoadReceiver(Register dest, Register argc) { + Ld(dest, MemOperand(sp, 0)); + } + + void StoreReceiver(Register rec, Register argc, Register scratch) { + Sd(rec, MemOperand(sp, 0)); + } + + bool IsNear(Label* L, Condition cond, int rs_reg); + + // Swap two registers. If the scratch register is omitted then a slightly + // less efficient form using xor instead of mov is emitted. + void Swap(Register reg1, Register reg2, Register scratch = no_reg); + + void PushRoot(RootIndex index) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Push(scratch); + } + + // Compare the object in a register to a value and jump if they are equal. + void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Branch(if_equal, eq, with, Operand(scratch)); + } + + // Compare the object in a register to a value and jump if they are not equal. + void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Branch(if_not_equal, ne, with, Operand(scratch)); + } + + // Checks if value is in range [lower_limit, higher_limit] using a single + // comparison. + void JumpIfIsInRange(Register value, unsigned lower_limit, + unsigned higher_limit, Label* on_in_range); + + // --------------------------------------------------------------------------- + // GC Support + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, int offset, Register value, RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); + + // For a given |object| notify the garbage collector that the slot |address| + // has been written. |value| is the object being stored. The value and + // address registers are clobbered by the operation. + void RecordWrite( + Register object, Operand offset, Register value, RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); + + // void Pref(int32_t hint, const MemOperand& rs); + + // --------------------------------------------------------------------------- + // Pseudo-instructions. + + void LoadWordPair(Register rd, const MemOperand& rs); + void StoreWordPair(Register rd, const MemOperand& rs); + + void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); + + // Enter exit frame. + // argc - argument count to be dropped by LeaveExitFrame. + // save_doubles - saves FPU registers on stack. + // stack_space - extra stack space. + void EnterExitFrame(bool save_doubles, int stack_space = 0, + StackFrame::Type frame_type = StackFrame::EXIT); + + // Leave the current exit frame. + void LeaveExitFrame(bool save_doubles, Register arg_count, + bool do_return = NO_EMIT_RETURN, + bool argument_count_is_length = false); + + // Make sure the stack is aligned. Only emits code in debug mode. + void AssertStackIsAligned(); + + // Load the global proxy from the current context. + void LoadGlobalProxy(Register dst) { + LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX); + } + + void LoadNativeContextSlot(Register dst, int index); + + // Load the initial map from the global function. The registers + // function and map can be the same, function is then overwritten. + void LoadGlobalFunctionInitialMap(Register function, Register map, + Register scratch); + + // ------------------------------------------------------------------------- + // JavaScript invokes. + + // Invoke the JavaScript function code by either calling or jumping. + void InvokeFunctionCode(Register function, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count, InvokeType type); + + // On function call, call into the debugger if necessary. + void CheckDebugHook(Register fun, Register new_target, + Register expected_parameter_count, + Register actual_parameter_count); + + // Invoke the JavaScript function in the given register. Changes the + // current context to the context in the function before invoking. + void InvokeFunctionWithNewTarget(Register function, Register new_target, + Register actual_parameter_count, + InvokeType type); + void InvokeFunction(Register function, Register expected_parameter_count, + Register actual_parameter_count, InvokeType type); + + // Exception handling. + + // Push a new stack handler and link into stack handler chain. + void PushStackHandler(); + + // Unlink the stack handler on top of the stack from the stack handler chain. + // Must preserve the result register. + void PopStackHandler(); + + // ------------------------------------------------------------------------- + // Support functions. + + void GetObjectType(Register function, Register map, Register type_reg); + + void GetInstanceTypeRange(Register map, Register type_reg, + InstanceType lower_limit, Register range); + + // ------------------------------------------------------------------------- + // Runtime calls. + + // Call a runtime routine. + void CallRuntime(const Runtime::Function* f, int num_arguments, + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); + + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId fid, + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + const Runtime::Function* function = Runtime::FunctionForId(fid); + CallRuntime(function, function->nargs, save_doubles); + } + + // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId fid, int num_arguments, + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { + CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); + } + + // Convenience function: tail call a runtime routine (jump). + void TailCallRuntime(Runtime::FunctionId fid); + + // Jump to the builtin routine. + void JumpToExternalReference(const ExternalReference& builtin, + bool builtin_exit_frame = false); + + // Generates a trampoline to jump to the off-heap instruction stream. + void JumpToOffHeapInstructionStream(Address entry); + + // --------------------------------------------------------------------------- + // In-place weak references. + void LoadWeakValue(Register out, Register in, Label* target_if_cleared); + + // ------------------------------------------------------------------------- + // StatsCounter support. + + void IncrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2) { + if (!FLAG_native_code_counters) return; + EmitIncrementCounter(counter, value, scratch1, scratch2); + } + void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2); + void DecrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2) { + if (!FLAG_native_code_counters) return; + EmitDecrementCounter(counter, value, scratch1, scratch2); + } + void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1, + Register scratch2); + + // ------------------------------------------------------------------------- + // Stack limit utilities + + enum StackLimitKind { kInterruptStackLimit, kRealStackLimit }; + void LoadStackLimit(Register destination, StackLimitKind kind); + void StackOverflowCheck(Register num_args, Register scratch1, + Register scratch2, Label* stack_overflow, + Label* done = nullptr); + + // Left-shifted from int32 equivalent of Smi. + void SmiScale(Register dst, Register src, int scale) { + if (SmiValuesAre32Bits()) { + // The int portion is upper 32-bits of 64-bit word. + srai(dst, src, (kSmiShift - scale) & 0x3F); + } else { + DCHECK(SmiValuesAre31Bits()); + DCHECK_GE(scale, kSmiTagSize); + slliw(dst, src, scale - kSmiTagSize); + } + } + + // Test if the register contains a smi. + inline void SmiTst(Register value, Register scratch) { + And(scratch, value, Operand(kSmiTagMask)); + } + + enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver }; + enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes }; + void DropArguments(Register count, ArgumentsCountType type, + ArgumentsCountMode mode, Register scratch = no_reg); + void DropArgumentsAndPushNewReceiver(Register argc, Register receiver, + ArgumentsCountType type, + ArgumentsCountMode mode, + Register scratch = no_reg); + + // Jump if the register contains a non-smi. + void JumpIfNotSmi(Register value, Label* not_smi_label); + + + // Abort execution if argument is not a Constructor, enabled via --debug-code. + void AssertConstructor(Register object); + + // Abort execution if argument is not a JSFunction, enabled via --debug-code. + void AssertFunction(Register object); + + // Abort execution if argument is not a callable JSFunction, enabled via + // --debug-code. + void AssertCallableFunction(Register object); + + // Abort execution if argument is not a JSBoundFunction, + // enabled via --debug-code. + void AssertBoundFunction(Register object); + + // Abort execution if argument is not a JSGeneratorObject (or subclass), + // enabled via --debug-code. + void AssertGeneratorObject(Register object); + + // Abort execution if argument is not undefined or an AllocationSite, enabled + // via --debug-code. + void AssertUndefinedOrAllocationSite(Register object, Register scratch); + + template + void DecodeField(Register dst, Register src) { + ExtractBits(dst, src, Field::kShift, Field::kSize); + } + + template + void DecodeField(Register reg) { + DecodeField(reg, reg); + } + + private: + // Helper functions for generating invokes. + void InvokePrologue(Register expected_parameter_count, + Register actual_parameter_count, Label* done, + InvokeType type); + + // Compute memory operands for safepoint stack slots. + static int SafepointRegisterStackIndex(int reg_code); + + // Needs access to SafepointRegisterStackIndex for compiled frame + // traversal. + friend class CommonFrame; + + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); +}; + +template +void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, + Func GetLabelFunction) { + // Ensure that dd-ed labels following this instruction use 8 bytes aligned + // addresses. + BlockTrampolinePoolFor(static_cast(case_count) * 2 + + kSwitchTablePrologueSize); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Register scratch2 = temps.Acquire(); + + Align(8); + // Load the address from the jump table at index and jump to it + auipc(scratch, 0); // Load the current PC into scratch + slli(scratch2, index, + kSystemPointerSizeLog2); // scratch2 = offset of indexth entry + add(scratch2, scratch2, + scratch); // scratch2 = (saved PC) + (offset of indexth entry) + ld(scratch2, scratch2, + 6 * kInstrSize); // Add the size of these 6 instructions to the + // offset, then load + jr(scratch2); // Jump to the address loaded from the table + nop(); // For 16-byte alignment + for (size_t index = 0; index < case_count; ++index) { + dd(GetLabelFunction(index)); + } +} + +#define ACCESS_MASM(masm) masm-> + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_ diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h new file mode 100644 index 00000000000000..fa5ffe40432af5 --- /dev/null +++ b/deps/v8/src/codegen/riscv64/register-riscv64.h @@ -0,0 +1,314 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_ +#define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_ + +#include "src/codegen/register-base.h" +#include "src/codegen/riscv64/constants-riscv64.h" + +namespace v8 { +namespace internal { + +// clang-format off + +#define GENERAL_REGISTERS(V) \ + V(zero_reg) V(ra) V(sp) V(gp) V(tp) V(t0) V(t1) V(t2) \ + V(fp) V(s1) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) \ + V(a6) V(a7) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) \ + V(s10) V(s11) V(t3) V(t4) V(t5) V(t6) + +// s3: scratch register s4: scratch register 2 used in code-generator-riscv64 +// s6: roots in Javascript code s7: context register +// s11: PtrComprCageBaseRegister +// t3 t5 : scratch register used in scratch_register_list +// t6 : call reg. +// t0 t1 t2 t4:caller saved scratch register can be used in macroassembler and +// builtin-riscv64 +#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \ + V(a0) V(a1) V(a2) V(a3) \ + V(a4) V(a5) V(a6) V(a7) V(t0) \ + V(t1) V(t2) V(t4) V(s7) V(s8) V(s9) V(s10) + +#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE +#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) +#else +#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) V(s11) +#endif + +#define ALLOCATABLE_GENERAL_REGISTERS(V) \ + ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \ + MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) + +#define DOUBLE_REGISTERS(V) \ + V(ft0) V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) \ + V(fs0) V(fs1) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \ + V(fa6) V(fa7) V(fs2) V(fs3) V(fs4) V(fs5) V(fs6) V(fs7) \ + V(fs8) V(fs9) V(fs10) V(fs11) V(ft8) V(ft9) V(ft10) V(ft11) + +#define FLOAT_REGISTERS DOUBLE_REGISTERS +#define VECTOR_REGISTERS(V) \ + V(v0) V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \ + V(v8) V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \ + V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \ + V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31) + +#define ALLOCATABLE_SIMD128_REGISTERS(V) \ + V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \ + V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) V(v16) \ + V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v26) \ + V(v27) V(v28) V(v29) V(v30) V(v31) + +#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ + V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \ + V(ft9) V(ft10) V(ft11) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \ + V(fa6) V(fa7) + +// Returns the number of padding slots needed for stack pointer alignment. +constexpr int ArgumentPaddingSlots(int argument_count) { + // No argument padding required. + return 0; +} + +// clang-format on + +// Note that the bit values must match those used in actual instruction +// encoding. +const int kNumRegs = 32; + +const int kUndefIndex = -1; +// Map with indexes on stack that corresponds to codes of saved registers. +const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg + kUndefIndex, // ra + kUndefIndex, // sp + kUndefIndex, // gp + kUndefIndex, // tp + 0, // t0 + 1, // t1 + 2, // t2 + 3, // s0/fp + 4, // s1 + 5, // a0 + 6, // a1 + 7, // a2 + 8, // a3 + 9, // a4 + 10, // a5 + 11, // a6 + 12, // a7 + 13, // s2 + 14, // s3 + 15, // s4 + 16, // s5 + 17, // s6 + 18, // s7 + 19, // s8 + 10, // s9 + 21, // s10 + 22, // s11 + kUndefIndex, // t3 + 23, // t4 + kUndefIndex, // t5 + kUndefIndex}; // t6 +// CPU Registers. +// +// 1) We would prefer to use an enum, but enum values are assignment- +// compatible with int, which has caused code-generation bugs. +// +// 2) We would prefer to use a class instead of a struct but we don't like +// the register initialization to depend on the particular initialization +// order (which appears to be different on OS X, Linux, and Windows for the +// installed versions of C++ we tried). Using a struct permits C-style +// "initialization". Also, the Register objects cannot be const as this +// forces initialization stubs in MSVC, making us dependent on initialization +// order. +// +// 3) By not using an enum, we are possibly preventing the compiler from +// doing certain constant folds, which may significantly reduce the +// code generated for some assembly instructions (because they boil down +// to a few constants). If this is a problem, we could change the code +// such that we use an enum in optimized mode, and the struct in debug +// mode. This way we get the compile-time error checking in debug mode +// and best performance in optimized code. + +// ----------------------------------------------------------------------------- +// Implementation of Register and FPURegister. + +enum RegisterCode { +#define REGISTER_CODE(R) kRegCode_##R, + GENERAL_REGISTERS(REGISTER_CODE) +#undef REGISTER_CODE + kRegAfterLast +}; + +class Register : public RegisterBase { + public: +#if defined(V8_TARGET_LITTLE_ENDIAN) + static constexpr int kMantissaOffset = 0; + static constexpr int kExponentOffset = 4; +#elif defined(V8_TARGET_BIG_ENDIAN) + static constexpr int kMantissaOffset = 4; + static constexpr int kExponentOffset = 0; +#else +#error Unknown endianness +#endif + + private: + friend class RegisterBase; + explicit constexpr Register(int code) : RegisterBase(code) {} +}; + +// s7: context register +// s3: scratch register +// s4: scratch register 2 +#define DECLARE_REGISTER(R) \ + constexpr Register R = Register::from_code(kRegCode_##R); +GENERAL_REGISTERS(DECLARE_REGISTER) +#undef DECLARE_REGISTER + +constexpr Register no_reg = Register::no_reg(); + +int ToNumber(Register reg); + +Register ToRegister(int num); + +constexpr bool kPadArguments = false; +constexpr AliasingKind kFPAliasing = AliasingKind::kIndependent; +constexpr bool kSimdMaskRegisters = false; + +enum DoubleRegisterCode { +#define REGISTER_CODE(R) kDoubleCode_##R, + DOUBLE_REGISTERS(REGISTER_CODE) +#undef REGISTER_CODE + kDoubleAfterLast +}; + +enum VRegisterCode { +#define REGISTER_CODE(R) kVRCode_##R, + VECTOR_REGISTERS(REGISTER_CODE) +#undef REGISTER_CODE + kVRAfterLast +}; +class VRegister : public RegisterBase { + friend class RegisterBase; + + public: + explicit constexpr VRegister(int code) : RegisterBase(code) {} +}; + +// Coprocessor register. +class FPURegister : public RegisterBase { + public: + // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers + // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to + // number of Double regs (64-bit regs, or FPU-reg-pairs). + + FPURegister low() const { + // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1. + // Find low reg of a Double-reg pair, which is the reg itself. + return FPURegister::from_code(code()); + } + FPURegister high() const { + // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1. + // Find high reg of a Doubel-reg pair, which is reg + 1. + return FPURegister::from_code(code() + 1); + } + + // FIXME(riscv64): In Rvv, Vector regs is different from Float Regs. But in + // this cl, in order to facilitate modification, it is assumed that the vector + // register and floating point register are shared. + VRegister toV() const { + DCHECK(base::IsInRange(static_cast(code()), 0, kVRAfterLast - 1)); + return VRegister(code()); + } + + private: + friend class RegisterBase; + explicit constexpr FPURegister(int code) : RegisterBase(code) {} +}; + + +// A few double registers are reserved: one as a scratch register and one to +// hold 0.0. +// fs9: 0.0 +// fs11: scratch register. + +// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers. +using FloatRegister = FPURegister; + +using DoubleRegister = FPURegister; + +using Simd128Register = VRegister; + +#define DECLARE_DOUBLE_REGISTER(R) \ + constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); +DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER) +#undef DECLARE_DOUBLE_REGISTER + +constexpr DoubleRegister no_dreg = DoubleRegister::no_reg(); + +#define DECLARE_VECTOR_REGISTER(R) \ + constexpr VRegister R = VRegister::from_code(kVRCode_##R); +VECTOR_REGISTERS(DECLARE_VECTOR_REGISTER) +#undef DECLARE_VECTOR_REGISTER + +const VRegister no_msareg = VRegister::no_reg(); + +// Register aliases. +// cp is assumed to be a callee saved register. +constexpr Register kRootRegister = s6; +constexpr Register cp = s7; +constexpr Register kScratchReg = s3; +constexpr Register kScratchReg2 = s4; + +constexpr DoubleRegister kScratchDoubleReg = ft0; + +constexpr DoubleRegister kDoubleRegZero = fs9; + +// Define {RegisterName} methods for the register types. +DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS) +DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS) +DEFINE_REGISTER_NAMES(VRegister, VECTOR_REGISTERS) + +// Give alias names to registers for calling conventions. +constexpr Register kReturnRegister0 = a0; +constexpr Register kReturnRegister1 = a1; +constexpr Register kReturnRegister2 = a2; +constexpr Register kJSFunctionRegister = a1; +constexpr Register kContextRegister = s7; +constexpr Register kAllocateSizeRegister = a1; +constexpr Register kInterpreterAccumulatorRegister = a0; +constexpr Register kInterpreterBytecodeOffsetRegister = t0; +constexpr Register kInterpreterBytecodeArrayRegister = t1; +constexpr Register kInterpreterDispatchTableRegister = t2; + +constexpr Register kJavaScriptCallArgCountRegister = a0; +constexpr Register kJavaScriptCallCodeStartRegister = a2; +constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister; +constexpr Register kJavaScriptCallNewTargetRegister = a3; +constexpr Register kJavaScriptCallExtraArg1Register = a2; + +constexpr Register kOffHeapTrampolineRegister = t6; +constexpr Register kRuntimeCallFunctionRegister = a1; +constexpr Register kRuntimeCallArgCountRegister = a0; +constexpr Register kRuntimeCallArgvRegister = a2; +constexpr Register kWasmInstanceRegister = a0; +constexpr Register kWasmCompileLazyFuncIndexRegister = t0; + +constexpr DoubleRegister kFPReturnRegister0 = fa0; +constexpr VRegister kSimd128ScratchReg = v24; +constexpr VRegister kSimd128ScratchReg2 = v23; +constexpr VRegister kSimd128ScratchReg3 = v8; +constexpr VRegister kSimd128RegZero = v25; + +#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE +constexpr Register kPtrComprCageBaseRegister = s11; // callee save +#else +constexpr Register kPtrComprCageBaseRegister = kRootRegister; +#endif + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_ diff --git a/deps/v8/src/codegen/riscv64/reglist-riscv64.h b/deps/v8/src/codegen/riscv64/reglist-riscv64.h new file mode 100644 index 00000000000000..363dd46181f1bd --- /dev/null +++ b/deps/v8/src/codegen/riscv64/reglist-riscv64.h @@ -0,0 +1,64 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_ +#define V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_ + +#include "src/codegen/register-arch.h" +#include "src/codegen/reglist-base.h" +#include "src/codegen/riscv64/constants-riscv64.h" + +namespace v8 { +namespace internal { + +using RegList = RegListBase; +using DoubleRegList = RegListBase; +ASSERT_TRIVIALLY_COPYABLE(RegList); +ASSERT_TRIVIALLY_COPYABLE(DoubleRegList); + +const RegList kJSCallerSaved = {t0, t1, t2, a0, a1, a2, a3, a4, a5, a6, a7, t4}; + +const int kNumJSCallerSaved = 12; + +// Callee-saved registers preserved when switching from C to JavaScript. +const RegList kCalleeSaved = {fp, // fp/s0 + s1, // s1 + s2, // s2 + s3, // s3 scratch register + s4, // s4 scratch register 2 + s5, // s5 + s6, // s6 (roots in Javascript code) + s7, // s7 (cp in Javascript code) + s8, // s8 + s9, // s9 + s10, // s10 + s11}; // s11 + +const int kNumCalleeSaved = 12; + +const DoubleRegList kCalleeSavedFPU = {fs0, fs1, fs2, fs3, fs4, fs5, + fs6, fs7, fs8, fs9, fs10, fs11}; + +const int kNumCalleeSavedFPU = kCalleeSavedFPU.Count(); + +const DoubleRegList kCallerSavedFPU = {ft0, ft1, ft2, ft3, ft4, ft5, ft6, + ft7, fa0, fa1, fa2, fa3, fa4, fa5, + fa6, fa7, ft8, ft9, ft10, ft11}; + +const int kNumCallerSavedFPU = kCallerSavedFPU.Count(); + +// Number of registers for which space is reserved in safepoints. Must be a +// multiple of 8. +const int kNumSafepointRegisters = 32; + +// Define the list of registers actually saved at safepoints. +// Note that the number of saved registers may be smaller than the reserved +// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. +const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; +const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_RISCV64_REGLIST_RISCV64_H_ diff --git a/deps/v8/src/common/allow-deprecated.h b/deps/v8/src/common/allow-deprecated.h new file mode 100644 index 00000000000000..8a512366c3091f --- /dev/null +++ b/deps/v8/src/common/allow-deprecated.h @@ -0,0 +1,37 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMMON_ALLOW_DEPRECATED_H_ +#define V8_COMMON_ALLOW_DEPRECATED_H_ + +#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) || \ + defined(V8_DEPRECATION_WARNINGS) + +#if defined(V8_CC_MSVC) + +#define START_ALLOW_USE_DEPRECATED() \ + __pragma(warning(push)) __pragma(warning(disable : 4996)) + +#define END_ALLOW_USE_DEPRECATED() __pragma(warning(pop)) + +#else // !defined(V8_CC_MSVC) + +#define START_ALLOW_USE_DEPRECATED() \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") + +#define END_ALLOW_USE_DEPRECATED() _Pragma("GCC diagnostic pop") + +#endif // !defined(V8_CC_MSVC) + +#else // !(defined(V8_IMMINENT_DEPRECATION_WARNINGS) || + // defined(V8_DEPRECATION_WARNINGS)) + +#define START_ALLOW_USE_DEPRECATED() +#define END_ALLOW_USE_DEPRECATED() + +#endif // !(defined(V8_IMMINENT_DEPRECATION_WARNINGS) || + // defined(V8_DEPRECATION_WARNINGS)) + +#endif // V8_COMMON_ALLOW_DEPRECATED_H_ diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc new file mode 100644 index 00000000000000..4dd0d5cd324d7f --- /dev/null +++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -0,0 +1,4455 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/assembler-inl.h" +#include "src/codegen/callable.h" +#include "src/codegen/macro-assembler.h" +#include "src/codegen/optimized-compilation-info.h" +#include "src/compiler/backend/code-generator-impl.h" +#include "src/compiler/backend/code-generator.h" +#include "src/compiler/backend/gap-resolver.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/osr.h" +#include "src/heap/memory-chunk.h" + +#if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/wasm-code-manager.h" +#endif // V8_ENABLE_WEBASSEMBLY + +namespace v8 { +namespace internal { +namespace compiler { + +#define __ tasm()-> + +// TODO(plind): consider renaming these macros. +#define TRACE_MSG(msg) \ + PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ + __LINE__) + +#define TRACE_UNIMPL() \ + PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ + __LINE__) + +// Adds Mips-specific methods to convert InstructionOperands. +class MipsOperandConverter final : public InstructionOperandConverter { + public: + MipsOperandConverter(CodeGenerator* gen, Instruction* instr) + : InstructionOperandConverter(gen, instr) {} + + FloatRegister OutputSingleRegister(size_t index = 0) { + return ToSingleRegister(instr_->OutputAt(index)); + } + + FloatRegister InputSingleRegister(size_t index) { + return ToSingleRegister(instr_->InputAt(index)); + } + + FloatRegister ToSingleRegister(InstructionOperand* op) { + // Single (Float) and Double register namespace is same on MIPS, + // both are typedefs of FPURegister. + return ToDoubleRegister(op); + } + + Register InputOrZeroRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) { + DCHECK_EQ(0, InputInt32(index)); + return zero_reg; + } + return InputRegister(index); + } + + DoubleRegister InputOrZeroDoubleRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; + + return InputDoubleRegister(index); + } + + DoubleRegister InputOrZeroSingleRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; + + return InputSingleRegister(index); + } + + Operand InputImmediate(size_t index) { + Constant constant = ToConstant(instr_->InputAt(index)); + switch (constant.type()) { + case Constant::kInt32: + return Operand(constant.ToInt32()); + case Constant::kFloat32: + return Operand::EmbeddedNumber(constant.ToFloat32()); + case Constant::kFloat64: + return Operand::EmbeddedNumber(constant.ToFloat64().value()); + case Constant::kInt64: + case Constant::kExternalReference: + case Constant::kCompressedHeapObject: + case Constant::kHeapObject: + // TODO(plind): Maybe we should handle ExtRef & HeapObj here? + // maybe not done on arm due to const pool ?? + break; + case Constant::kDelayedStringConstant: + return Operand::EmbeddedStringConstant( + constant.ToDelayedStringConstant()); + case Constant::kRpoNumber: + UNREACHABLE(); // TODO(titzer): RPO immediates on mips? + } + UNREACHABLE(); + } + + Operand InputOperand(size_t index) { + InstructionOperand* op = instr_->InputAt(index); + if (op->IsRegister()) { + return Operand(ToRegister(op)); + } + return InputImmediate(index); + } + + MemOperand MemoryOperand(size_t* first_index) { + const size_t index = *first_index; + switch (AddressingModeField::decode(instr_->opcode())) { + case kMode_None: + break; + case kMode_MRI: + *first_index += 2; + return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); + case kMode_MRR: + // TODO(plind): r6 address mode, to be implemented ... + UNREACHABLE(); + } + UNREACHABLE(); + } + + MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } + + MemOperand ToMemOperand(InstructionOperand* op) const { + DCHECK_NOT_NULL(op); + DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); + return SlotToMemOperand(AllocatedOperand::cast(op)->index()); + } + + MemOperand SlotToMemOperand(int slot) const { + FrameOffset offset = frame_access_state()->GetFrameOffset(slot); + return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); + } +}; + +static inline bool HasRegisterInput(Instruction* instr, size_t index) { + return instr->InputAt(index)->IsRegister(); +} + +namespace { + +class OutOfLineRecordWrite final : public OutOfLineCode { + public: + OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, + Register value, Register scratch0, Register scratch1, + RecordWriteMode mode, StubCallMode stub_mode) + : OutOfLineCode(gen), + object_(object), + index_(index), + value_(value), + scratch0_(scratch0), + scratch1_(scratch1), + mode_(mode), +#if V8_ENABLE_WEBASSEMBLY + stub_mode_(stub_mode), +#endif // V8_ENABLE_WEBASSEMBLY + must_save_lr_(!gen->frame_access_state()->has_frame()), + zone_(gen->zone()) { + DCHECK(!AreAliased(object, index, scratch0, scratch1)); + DCHECK(!AreAliased(value, index, scratch0, scratch1)); + } + + void Generate() final { + __ CheckPageFlag(value_, scratch0_, + MemoryChunk::kPointersToHereAreInterestingMask, eq, + exit()); + __ Addu(scratch1_, object_, index_); + RememberedSetAction const remembered_set_action = + mode_ > RecordWriteMode::kValueIsMap || + FLAG_use_full_record_write_builtin + ? RememberedSetAction::kEmit + : RememberedSetAction::kOmit; + SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() + ? SaveFPRegsMode::kSave + : SaveFPRegsMode::kIgnore; + if (must_save_lr_) { + // We need to save and restore ra if the frame was elided. + __ Push(ra); + } + + if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { + __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); +#if V8_ENABLE_WEBASSEMBLY + } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { + // A direct call to a wasm runtime stub defined in this module. + // Just encode the stub index. This will be patched when the code + // is added to the native module and copied into wasm code space. + __ CallRecordWriteStubSaveRegisters(object_, scratch1_, + remembered_set_action, save_fp_mode, + StubCallMode::kCallWasmRuntimeStub); +#endif // V8_ENABLE_WEBASSEMBLY + } else { + __ CallRecordWriteStubSaveRegisters(object_, scratch1_, + remembered_set_action, save_fp_mode); + } + if (must_save_lr_) { + __ Pop(ra); + } + } + + private: + Register const object_; + Register const index_; + Register const value_; + Register const scratch0_; + Register const scratch1_; + RecordWriteMode const mode_; +#if V8_ENABLE_WEBASSEMBLY + StubCallMode const stub_mode_; +#endif // V8_ENABLE_WEBASSEMBLY + bool must_save_lr_; + Zone* zone_; +}; + +#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ + class ool_name final : public OutOfLineCode { \ + public: \ + ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ + : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ + \ + void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ + \ + private: \ + T const dst_; \ + T const src1_; \ + T const src2_; \ + } + +CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister); +CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister); +CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister); +CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister); + +#undef CREATE_OOL_CLASS + +Condition FlagsConditionToConditionCmp(FlagsCondition condition) { + switch (condition) { + case kEqual: + return eq; + case kNotEqual: + return ne; + case kSignedLessThan: + return lt; + case kSignedGreaterThanOrEqual: + return ge; + case kSignedLessThanOrEqual: + return le; + case kSignedGreaterThan: + return gt; + case kUnsignedLessThan: + return lo; + case kUnsignedGreaterThanOrEqual: + return hs; + case kUnsignedLessThanOrEqual: + return ls; + case kUnsignedGreaterThan: + return hi; + case kUnorderedEqual: + case kUnorderedNotEqual: + break; + default: + break; + } + UNREACHABLE(); +} + +Condition FlagsConditionToConditionTst(FlagsCondition condition) { + switch (condition) { + case kNotEqual: + return ne; + case kEqual: + return eq; + default: + break; + } + UNREACHABLE(); +} + +FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, + FlagsCondition condition) { + switch (condition) { + case kEqual: + *predicate = true; + return EQ; + case kNotEqual: + *predicate = false; + return EQ; + case kUnsignedLessThan: + *predicate = true; + return OLT; + case kUnsignedGreaterThanOrEqual: + *predicate = false; + return OLT; + case kUnsignedLessThanOrEqual: + *predicate = true; + return OLE; + case kUnsignedGreaterThan: + *predicate = false; + return OLE; + case kUnorderedEqual: + case kUnorderedNotEqual: + *predicate = true; + break; + default: + *predicate = true; + break; + } + UNREACHABLE(); +} + +#define UNSUPPORTED_COND(opcode, condition) \ + StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ + << "\""; \ + UNIMPLEMENTED(); + +} // namespace + +#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ + do { \ + __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ + do { \ + __ sync(); \ + __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_BINOP(bin_instr) \ + do { \ + Label binop; \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ sync(); \ + __ bind(&binop); \ + __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ + __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ + Operand(i.InputRegister(2))); \ + __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \ + do { \ + if (IsMipsArchVariant(kMips32r6)) { \ + Label binop; \ + Register oldval_low = \ + instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \ + Register oldval_high = \ + instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ sync(); \ + __ bind(&binop); \ + __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \ + __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \ + __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \ + oldval_high, i.InputRegister(2), i.InputRegister(3)); \ + __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \ + __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } else { \ + FrameScope scope(tasm(), StackFrame::MANUAL); \ + __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \ + __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ + __ PrepareCallCFunction(3, 0, kScratchReg); \ + __ CallCFunction(ExternalReference::external(), 3, 0); \ + __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ + } \ + } while (0) + +#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \ + do { \ + if (IsMipsArchVariant(kMips32r6)) { \ + Label binop; \ + Register oldval_low = \ + instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \ + Register oldval_high = \ + instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ sync(); \ + __ bind(&binop); \ + __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \ + __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \ + __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \ + oldval_high, i.InputRegister(2), i.InputRegister(3), \ + kScratchReg, kScratchReg2); \ + __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \ + __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } else { \ + FrameScope scope(tasm(), StackFrame::MANUAL); \ + __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \ + __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ + __ PrepareCallCFunction(3, 0, kScratchReg); \ + __ CallCFunction(ExternalReference::external(), 3, 0); \ + __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ + } \ + } while (0) + +#define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \ + do { \ + Label binop; \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \ + __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(3))); \ + __ sll(i.TempRegister(3), i.TempRegister(3), 3); \ + __ sync(); \ + __ bind(&binop); \ + __ Ll(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ + size, sign_extend); \ + __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ + Operand(i.InputRegister(2))); \ + __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ + size); \ + __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \ + do { \ + Label exchange; \ + __ sync(); \ + __ bind(&exchange); \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ + __ mov(i.TempRegister(1), i.InputRegister(2)); \ + __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \ + do { \ + Label exchange; \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ + __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \ + __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ + __ sync(); \ + __ bind(&exchange); \ + __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ + size, sign_extend); \ + __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ + size); \ + __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \ + do { \ + Label compareExchange; \ + Label exit; \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ sync(); \ + __ bind(&compareExchange); \ + __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&exit, ne, i.InputRegister(2), \ + Operand(i.OutputRegister(0))); \ + __ mov(i.TempRegister(2), i.InputRegister(3)); \ + __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ + Operand(zero_reg)); \ + __ bind(&exit); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \ + do { \ + Label compareExchange; \ + Label exit; \ + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ + __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \ + __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ + __ sync(); \ + __ bind(&compareExchange); \ + __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ + size, sign_extend); \ + __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \ + sign_extend); \ + __ BranchShort(&exit, ne, i.InputRegister(2), \ + Operand(i.OutputRegister(0))); \ + __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ + size); \ + __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ + Operand(zero_reg)); \ + __ bind(&exit); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_IEEE754_BINOP(name) \ + do { \ + FrameScope scope(tasm(), StackFrame::MANUAL); \ + __ PrepareCallCFunction(0, 2, kScratchReg); \ + __ MovToFloatParameters(i.InputDoubleRegister(0), \ + i.InputDoubleRegister(1)); \ + __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ + /* Move the result in the double result register. */ \ + __ MovFromFloatResult(i.OutputDoubleRegister()); \ + } while (0) + +#define ASSEMBLE_IEEE754_UNOP(name) \ + do { \ + FrameScope scope(tasm(), StackFrame::MANUAL); \ + __ PrepareCallCFunction(0, 1, kScratchReg); \ + __ MovToFloatParameter(i.InputDoubleRegister(0)); \ + __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ + /* Move the result in the double result register. */ \ + __ MovFromFloatResult(i.OutputDoubleRegister()); \ + } while (0) + +#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \ + do { \ + __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \ + i.InputSimd128Register(1)); \ + } while (0) + +#define ASSEMBLE_SIMD_EXTENDED_MULTIPLY(op0, op1) \ + do { \ + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); \ + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); \ + __ op0(kSimd128ScratchReg, kSimd128RegZero, i.InputSimd128Register(0)); \ + __ op0(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(1)); \ + __ op1(i.OutputSimd128Register(), kSimd128ScratchReg, kSimd128RegZero); \ + } while (0) + +void CodeGenerator::AssembleDeconstructFrame() { + __ mov(sp, fp); + __ Pop(ra, fp); +} + +void CodeGenerator::AssemblePrepareTailCall() { + if (frame_access_state()->has_frame()) { + __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); + __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + } + frame_access_state()->SetFrameAccessToSP(); +} +namespace { + +void AdjustStackPointerForTailCall(TurboAssembler* tasm, + FrameAccessState* state, + int new_slot_above_sp, + bool allow_shrinkage = true) { + int current_sp_offset = state->GetSPToFPSlotCount() + + StandardFrameConstants::kFixedSlotCountAboveFp; + int stack_slot_delta = new_slot_above_sp - current_sp_offset; + if (stack_slot_delta > 0) { + tasm->Subu(sp, sp, stack_slot_delta * kSystemPointerSize); + state->IncreaseSPDelta(stack_slot_delta); + } else if (allow_shrinkage && stack_slot_delta < 0) { + tasm->Addu(sp, sp, -stack_slot_delta * kSystemPointerSize); + state->IncreaseSPDelta(stack_slot_delta); + } +} + +} // namespace + +void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, + int first_unused_slot_offset) { + AdjustStackPointerForTailCall(tasm(), frame_access_state(), + first_unused_slot_offset, false); +} + +void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, + int first_unused_slot_offset) { + AdjustStackPointerForTailCall(tasm(), frame_access_state(), + first_unused_slot_offset); +} + +// Check that {kJavaScriptCallCodeStartRegister} is correct. +void CodeGenerator::AssembleCodeStartRegisterCheck() { + __ ComputeCodeStartAddress(kScratchReg); + __ Assert(eq, AbortReason::kWrongFunctionCodeStart, + kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); +} + +// Check if the code object is marked for deoptimization. If it is, then it +// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need +// to: +// 1. read from memory the word that contains that bit, which can be found in +// the flags in the referenced {CodeDataContainer} object; +// 2. test kMarkedForDeoptimizationBit in those flags; and +// 3. if it is not zero then it jumps to the builtin. +void CodeGenerator::BailoutIfDeoptimized() { + int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; + __ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); + __ lw(kScratchReg, + FieldMemOperand(kScratchReg, + CodeDataContainer::kKindSpecificFlagsOffset)); + __ And(kScratchReg, kScratchReg, + Operand(1 << Code::kMarkedForDeoptimizationBit)); + __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), + RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); +} + +// Assembles an instruction after register allocation, producing machine code. +CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( + Instruction* instr) { + MipsOperandConverter i(this, instr); + InstructionCode opcode = instr->opcode(); + ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); + switch (arch_opcode) { + case kArchCallCodeObject: { + if (instr->InputAt(0)->IsImmediate()) { + __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); + } else { + Register reg = i.InputRegister(0); + DCHECK_IMPLIES( + instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), + reg == kJavaScriptCallCodeStartRegister); + __ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag); + } + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + break; + } + case kArchCallBuiltinPointer: { + DCHECK(!instr->InputAt(0)->IsImmediate()); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + break; + } +#if V8_ENABLE_WEBASSEMBLY + case kArchCallWasmFunction: { + if (instr->InputAt(0)->IsImmediate()) { + Constant constant = i.ToConstant(instr->InputAt(0)); + Address wasm_code = static_cast
(constant.ToInt32()); + __ Call(wasm_code, constant.rmode()); + } else { + __ Call(i.InputRegister(0)); + } + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + break; + } + case kArchTailCallWasm: { + if (instr->InputAt(0)->IsImmediate()) { + Constant constant = i.ToConstant(instr->InputAt(0)); + Address wasm_code = static_cast
(constant.ToInt32()); + __ Jump(wasm_code, constant.rmode()); + } else { + __ Jump(i.InputRegister(0)); + } + frame_access_state()->ClearSPDelta(); + frame_access_state()->SetFrameAccessToDefault(); + break; + } +#endif // V8_ENABLE_WEBASSEMBLY + case kArchTailCallCodeObject: { + if (instr->InputAt(0)->IsImmediate()) { + __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); + } else { + Register reg = i.InputRegister(0); + DCHECK_IMPLIES( + instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), + reg == kJavaScriptCallCodeStartRegister); + __ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag); + __ Jump(reg); + } + frame_access_state()->ClearSPDelta(); + frame_access_state()->SetFrameAccessToDefault(); + break; + } + case kArchTailCallAddress: { + CHECK(!instr->InputAt(0)->IsImmediate()); + Register reg = i.InputRegister(0); + DCHECK_IMPLIES( + instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), + reg == kJavaScriptCallCodeStartRegister); + __ Jump(reg); + frame_access_state()->ClearSPDelta(); + frame_access_state()->SetFrameAccessToDefault(); + break; + } + case kArchCallJSFunction: { + Register func = i.InputRegister(0); + if (FLAG_debug_code) { + // Check the function's context matches the context argument. + __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); + __ Assert(eq, AbortReason::kWrongFunctionContext, cp, + Operand(kScratchReg)); + } + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); + __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag); + __ Call(a2); + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + frame_access_state()->SetFrameAccessToDefault(); + break; + } + case kArchPrepareCallCFunction: { + int const num_parameters = MiscField::decode(instr->opcode()); + __ PrepareCallCFunction(num_parameters, kScratchReg); + // Frame alignment requires using FP-relative frame addressing. + frame_access_state()->SetFrameAccessToFP(); + break; + } + case kArchSaveCallerRegisters: { + fp_mode_ = + static_cast(MiscField::decode(instr->opcode())); + DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore || + fp_mode_ == SaveFPRegsMode::kSave); + // kReturnRegister0 should have been saved before entering the stub. + int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); + DCHECK(IsAligned(bytes, kSystemPointerSize)); + DCHECK_EQ(0, frame_access_state()->sp_delta()); + frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); + DCHECK(!caller_registers_saved_); + caller_registers_saved_ = true; + break; + } + case kArchRestoreCallerRegisters: { + DCHECK(fp_mode_ == + static_cast(MiscField::decode(instr->opcode()))); + DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore || + fp_mode_ == SaveFPRegsMode::kSave); + // Don't overwrite the returned value. + int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); + frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); + DCHECK_EQ(0, frame_access_state()->sp_delta()); + DCHECK(caller_registers_saved_); + caller_registers_saved_ = false; + break; + } + case kArchPrepareTailCall: + AssemblePrepareTailCall(); + break; + case kArchCallCFunction: { + int const num_parameters = MiscField::decode(instr->opcode()); +#if V8_ENABLE_WEBASSEMBLY + Label start_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + // from start_call to return address. + int offset = __ root_array_available() ? 64 : 88; +#endif // V8_ENABLE_WEBASSEMBLY +#if V8_HOST_ARCH_MIPS + if (FLAG_debug_code) { + offset += 16; + } +#endif + +#if V8_ENABLE_WEBASSEMBLY + if (isWasmCapiFunction) { + // Put the return address in a stack slot. + __ mov(kScratchReg, ra); + __ bind(&start_call); + __ nal(); + __ nop(); + __ Addu(ra, ra, offset - 8); // 8 = nop + nal + __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(ra, kScratchReg); + } +#endif // V8_ENABLE_WEBASSEMBLY + + if (instr->InputAt(0)->IsImmediate()) { + ExternalReference ref = i.InputExternalReference(0); + __ CallCFunction(ref, num_parameters); + } else { + Register func = i.InputRegister(0); + __ CallCFunction(func, num_parameters); + } + +#if V8_ENABLE_WEBASSEMBLY + if (isWasmCapiFunction) { + CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); + RecordSafepoint(instr->reference_map()); + } +#endif // V8_ENABLE_WEBASSEMBLY + + frame_access_state()->SetFrameAccessToDefault(); + // Ideally, we should decrement SP delta to match the change of stack + // pointer in CallCFunction. However, for certain architectures (e.g. + // ARM), there may be more strict alignment requirement, causing old SP + // to be saved on the stack. In those cases, we can not calculate the SP + // delta statically. + frame_access_state()->ClearSPDelta(); + if (caller_registers_saved_) { + // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. + // Here, we assume the sequence to be: + // kArchSaveCallerRegisters; + // kArchCallCFunction; + // kArchRestoreCallerRegisters; + int bytes = + __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); + frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); + } + break; + } + case kArchJmp: + AssembleArchJump(i.InputRpo(0)); + break; + case kArchBinarySearchSwitch: + AssembleArchBinarySearchSwitch(instr); + break; + case kArchTableSwitch: + AssembleArchTableSwitch(instr); + break; + case kArchAbortCSADcheck: + DCHECK(i.InputRegister(0) == a0); + { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), + RelocInfo::CODE_TARGET); + } + __ stop(); + break; + case kArchDebugBreak: + __ DebugBreak(); + break; + case kArchComment: + __ RecordComment(reinterpret_cast(i.InputInt32(0))); + break; + case kArchNop: + case kArchThrowTerminator: + // don't emit code for nops. + break; + case kArchDeoptimize: { + DeoptimizationExit* exit = + BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore()); + __ Branch(exit->label()); + break; + } + case kArchRet: + AssembleReturn(instr->InputAt(0)); + break; + case kArchStackPointerGreaterThan: { + Register lhs_register = sp; + uint32_t offset; + if (ShouldApplyOffsetToStackCheck(instr, &offset)) { + lhs_register = i.TempRegister(1); + __ Subu(lhs_register, sp, offset); + } + __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register); + break; + } + case kArchStackCheckOffset: + __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); + break; + case kArchFramePointer: + __ mov(i.OutputRegister(), fp); + break; + case kArchParentFramePointer: + if (frame_access_state()->has_frame()) { + __ lw(i.OutputRegister(), MemOperand(fp, 0)); + } else { + __ mov(i.OutputRegister(), fp); + } + break; + case kArchTruncateDoubleToI: + __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), + i.InputDoubleRegister(0), DetermineStubCallMode()); + break; + case kArchStoreWithWriteBarrier: + case kArchAtomicStoreWithWriteBarrier: { + RecordWriteMode mode = + static_cast(MiscField::decode(instr->opcode())); + Register object = i.InputRegister(0); + Register index = i.InputRegister(1); + Register value = i.InputRegister(2); + Register scratch0 = i.TempRegister(0); + Register scratch1 = i.TempRegister(1); + auto ool = zone()->New(this, object, index, value, + scratch0, scratch1, mode, + DetermineStubCallMode()); + __ Addu(kScratchReg, object, index); + if (arch_opcode == kArchStoreWithWriteBarrier) { + __ sw(value, MemOperand(kScratchReg)); + } else { + DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode); + __ sync(); + __ sw(value, MemOperand(kScratchReg)); + __ sync(); + } + if (mode > RecordWriteMode::kValueIsPointer) { + __ JumpIfSmi(value, ool->exit()); + } + __ CheckPageFlag(object, scratch0, + MemoryChunk::kPointersFromHereAreInterestingMask, ne, + ool->entry()); + __ bind(ool->exit()); + break; + } + case kArchStackSlot: { + FrameOffset offset = + frame_access_state()->GetFrameOffset(i.InputInt32(0)); + Register base_reg = offset.from_stack_pointer() ? sp : fp; + __ Addu(i.OutputRegister(), base_reg, Operand(offset.offset())); + if (FLAG_debug_code > 0) { + // Verify that the output_register is properly aligned + __ And(kScratchReg, i.OutputRegister(), + Operand(kSystemPointerSize - 1)); + __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, + Operand(zero_reg)); + } + break; + } + case kIeee754Float64Acos: + ASSEMBLE_IEEE754_UNOP(acos); + break; + case kIeee754Float64Acosh: + ASSEMBLE_IEEE754_UNOP(acosh); + break; + case kIeee754Float64Asin: + ASSEMBLE_IEEE754_UNOP(asin); + break; + case kIeee754Float64Asinh: + ASSEMBLE_IEEE754_UNOP(asinh); + break; + case kIeee754Float64Atan: + ASSEMBLE_IEEE754_UNOP(atan); + break; + case kIeee754Float64Atanh: + ASSEMBLE_IEEE754_UNOP(atanh); + break; + case kIeee754Float64Atan2: + ASSEMBLE_IEEE754_BINOP(atan2); + break; + case kIeee754Float64Cos: + ASSEMBLE_IEEE754_UNOP(cos); + break; + case kIeee754Float64Cosh: + ASSEMBLE_IEEE754_UNOP(cosh); + break; + case kIeee754Float64Cbrt: + ASSEMBLE_IEEE754_UNOP(cbrt); + break; + case kIeee754Float64Exp: + ASSEMBLE_IEEE754_UNOP(exp); + break; + case kIeee754Float64Expm1: + ASSEMBLE_IEEE754_UNOP(expm1); + break; + case kIeee754Float64Log: + ASSEMBLE_IEEE754_UNOP(log); + break; + case kIeee754Float64Log1p: + ASSEMBLE_IEEE754_UNOP(log1p); + break; + case kIeee754Float64Log10: + ASSEMBLE_IEEE754_UNOP(log10); + break; + case kIeee754Float64Log2: + ASSEMBLE_IEEE754_UNOP(log2); + break; + case kIeee754Float64Pow: + ASSEMBLE_IEEE754_BINOP(pow); + break; + case kIeee754Float64Sin: + ASSEMBLE_IEEE754_UNOP(sin); + break; + case kIeee754Float64Sinh: + ASSEMBLE_IEEE754_UNOP(sinh); + break; + case kIeee754Float64Tan: + ASSEMBLE_IEEE754_UNOP(tan); + break; + case kIeee754Float64Tanh: + ASSEMBLE_IEEE754_UNOP(tanh); + break; + case kMipsAdd: + __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsAddOvf: + __ AddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), + kScratchReg); + break; + case kMipsSub: + __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsSubOvf: + __ SubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), + kScratchReg); + break; + case kMipsMul: + __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsMulOvf: + __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), + kScratchReg); + break; + case kMipsMulHigh: + __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsMulHighU: + __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsDiv: + __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + if (IsMipsArchVariant(kMips32r6)) { + __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); + } + break; + case kMipsDivU: + __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + if (IsMipsArchVariant(kMips32r6)) { + __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); + } + break; + case kMipsMod: + __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsModU: + __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsAnd: + __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsOr: + __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsNor: + if (instr->InputAt(1)->IsRegister()) { + __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + } else { + DCHECK_EQ(0, i.InputOperand(1).immediate()); + __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); + } + break; + case kMipsXor: + __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsClz: + __ Clz(i.OutputRegister(), i.InputRegister(0)); + break; + case kMipsCtz: { + Register src = i.InputRegister(0); + Register dst = i.OutputRegister(); + __ Ctz(dst, src); + } break; + case kMipsPopcnt: { + Register src = i.InputRegister(0); + Register dst = i.OutputRegister(); + __ Popcnt(dst, src); + } break; + case kMipsShl: + if (instr->InputAt(1)->IsRegister()) { + __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + int32_t imm = i.InputOperand(1).immediate(); + __ sll(i.OutputRegister(), i.InputRegister(0), imm); + } + break; + case kMipsShr: + if (instr->InputAt(1)->IsRegister()) { + __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + int32_t imm = i.InputOperand(1).immediate(); + __ srl(i.OutputRegister(), i.InputRegister(0), imm); + } + break; + case kMipsSar: + if (instr->InputAt(1)->IsRegister()) { + __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + int32_t imm = i.InputOperand(1).immediate(); + __ sra(i.OutputRegister(), i.InputRegister(0), imm); + } + break; + case kMipsShlPair: { + Register second_output = + instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0); + if (instr->InputAt(2)->IsRegister()) { + __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0), + i.InputRegister(1), i.InputRegister(2), kScratchReg, + kScratchReg2); + } else { + uint32_t imm = i.InputOperand(2).immediate(); + __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0), + i.InputRegister(1), imm, kScratchReg); + } + } break; + case kMipsShrPair: { + Register second_output = + instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0); + if (instr->InputAt(2)->IsRegister()) { + __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0), + i.InputRegister(1), i.InputRegister(2), kScratchReg, + kScratchReg2); + } else { + uint32_t imm = i.InputOperand(2).immediate(); + __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0), + i.InputRegister(1), imm, kScratchReg); + } + } break; + case kMipsSarPair: { + Register second_output = + instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0); + if (instr->InputAt(2)->IsRegister()) { + __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0), + i.InputRegister(1), i.InputRegister(2), kScratchReg, + kScratchReg2); + } else { + uint32_t imm = i.InputOperand(2).immediate(); + __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0), + i.InputRegister(1), imm, kScratchReg); + } + } break; + case kMipsExt: + __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), + i.InputInt8(2)); + break; + case kMipsIns: + if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { + __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); + } else { + __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), + i.InputInt8(2)); + } + break; + case kMipsRor: + __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsTst: + __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); + break; + case kMipsCmp: + // Pseudo-instruction used for cmp/branch. No opcode emitted here. + break; + case kMipsMov: + // TODO(plind): Should we combine mov/li like this, or use separate instr? + // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType + if (HasRegisterInput(instr, 0)) { + __ mov(i.OutputRegister(), i.InputRegister(0)); + } else { + __ li(i.OutputRegister(), i.InputOperand(0)); + } + break; + case kMipsLsa: + DCHECK(instr->InputAt(2)->IsImmediate()); + __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), + i.InputInt8(2)); + break; + case kMipsCmpS: { + FPURegister left = i.InputOrZeroSingleRegister(0); + FPURegister right = i.InputOrZeroSingleRegister(1); + bool predicate; + FPUCondition cc = + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); + + if ((left == kDoubleRegZero || right == kDoubleRegZero) && + !__ IsDoubleZeroRegSet()) { + __ Move(kDoubleRegZero, 0.0); + } + + __ CompareF32(cc, left, right); + } break; + case kMipsAddS: + // TODO(plind): add special case: combine mult & add. + __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsSubS: + __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsMulS: + // TODO(plind): add special case: right op is -1.0, see arm port. + __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsDivS: + __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsAbsS: + if (IsMipsArchVariant(kMips32r6)) { + __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + } else { + __ mfc1(kScratchReg, i.InputSingleRegister(0)); + __ Ins(kScratchReg, zero_reg, 31, 1); + __ mtc1(kScratchReg, i.OutputSingleRegister()); + } + break; + case kMipsSqrtS: { + __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kMipsMaxS: + __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsMinS: + __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsCmpD: { + FPURegister left = i.InputOrZeroDoubleRegister(0); + FPURegister right = i.InputOrZeroDoubleRegister(1); + bool predicate; + FPUCondition cc = + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); + if ((left == kDoubleRegZero || right == kDoubleRegZero) && + !__ IsDoubleZeroRegSet()) { + __ Move(kDoubleRegZero, 0.0); + } + __ CompareF64(cc, left, right); + } break; + case kMipsAddPair: + __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), + i.InputRegister(1), i.InputRegister(2), i.InputRegister(3), + kScratchReg, kScratchReg2); + break; + case kMipsSubPair: + __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), + i.InputRegister(1), i.InputRegister(2), i.InputRegister(3), + kScratchReg, kScratchReg2); + break; + case kMipsMulPair: { + __ MulPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), + i.InputRegister(1), i.InputRegister(2), i.InputRegister(3), + kScratchReg, kScratchReg2); + } break; + case kMipsAddD: + // TODO(plind): add special case: combine mult & add. + __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsSubD: + __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsMaddS: + __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0), + i.InputFloatRegister(1), i.InputFloatRegister(2), + kScratchDoubleReg); + break; + case kMipsMaddD: + __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1), i.InputDoubleRegister(2), + kScratchDoubleReg); + break; + case kMipsMsubS: + __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0), + i.InputFloatRegister(1), i.InputFloatRegister(2), + kScratchDoubleReg); + break; + case kMipsMsubD: + __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1), i.InputDoubleRegister(2), + kScratchDoubleReg); + break; + case kMipsMulD: + // TODO(plind): add special case: right op is -1.0, see arm port. + __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsDivD: + __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsModD: { + // TODO(bmeurer): We should really get rid of this special instruction, + // and generate a CallAddress instruction instead. + FrameScope scope(tasm(), StackFrame::MANUAL); + __ PrepareCallCFunction(0, 2, kScratchReg); + __ MovToFloatParameters(i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); + // Move the result in the double result register. + __ MovFromFloatResult(i.OutputDoubleRegister()); + break; + } + case kMipsAbsD: { + FPURegister src = i.InputDoubleRegister(0); + FPURegister dst = i.OutputDoubleRegister(); + if (IsMipsArchVariant(kMips32r6)) { + __ abs_d(dst, src); + } else { + __ Move(dst, src); + __ mfhc1(kScratchReg, src); + __ Ins(kScratchReg, zero_reg, 31, 1); + __ mthc1(kScratchReg, dst); + } + break; + } + case kMipsNegS: + __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + break; + case kMipsNegD: + __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + case kMipsSqrtD: { + __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kMipsMaxD: + __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsMinD: + __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kMipsFloat64RoundDown: { + __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kMipsFloat32RoundDown: { + __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + break; + } + case kMipsFloat64RoundTruncate: { + __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kMipsFloat32RoundTruncate: { + __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + break; + } + case kMipsFloat64RoundUp: { + __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kMipsFloat32RoundUp: { + __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + break; + } + case kMipsFloat64RoundTiesEven: { + __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kMipsFloat32RoundTiesEven: { + __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + break; + } + case kMipsFloat32Max: { + FPURegister dst = i.OutputSingleRegister(); + FPURegister src1 = i.InputSingleRegister(0); + FPURegister src2 = i.InputSingleRegister(1); + auto ool = zone()->New(this, dst, src1, src2); + __ Float32Max(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; + } + case kMipsFloat64Max: { + DoubleRegister dst = i.OutputDoubleRegister(); + DoubleRegister src1 = i.InputDoubleRegister(0); + DoubleRegister src2 = i.InputDoubleRegister(1); + auto ool = zone()->New(this, dst, src1, src2); + __ Float64Max(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; + } + case kMipsFloat32Min: { + FPURegister dst = i.OutputSingleRegister(); + FPURegister src1 = i.InputSingleRegister(0); + FPURegister src2 = i.InputSingleRegister(1); + auto ool = zone()->New(this, dst, src1, src2); + __ Float32Min(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; + } + case kMipsFloat64Min: { + DoubleRegister dst = i.OutputDoubleRegister(); + DoubleRegister src1 = i.InputDoubleRegister(0); + DoubleRegister src2 = i.InputDoubleRegister(1); + auto ool = zone()->New(this, dst, src1, src2); + __ Float64Min(dst, src1, src2, ool->entry()); + __ bind(ool->exit()); + break; + } + case kMipsCvtSD: { + __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); + break; + } + case kMipsCvtDS: { + __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); + break; + } + case kMipsCvtDW: { + FPURegister scratch = kScratchDoubleReg; + __ mtc1(i.InputRegister(0), scratch); + __ cvt_d_w(i.OutputDoubleRegister(), scratch); + break; + } + case kMipsCvtSW: { + FPURegister scratch = kScratchDoubleReg; + __ mtc1(i.InputRegister(0), scratch); + __ cvt_s_w(i.OutputDoubleRegister(), scratch); + break; + } + case kMipsCvtSUw: { + FPURegister scratch = kScratchDoubleReg; + __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch); + __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister()); + break; + } + case kMipsCvtDUw: { + FPURegister scratch = kScratchDoubleReg; + __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch); + break; + } + case kMipsFloorWD: { + FPURegister scratch = kScratchDoubleReg; + __ Floor_w_d(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + break; + } + case kMipsCeilWD: { + FPURegister scratch = kScratchDoubleReg; + __ Ceil_w_d(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + break; + } + case kMipsRoundWD: { + FPURegister scratch = kScratchDoubleReg; + __ Round_w_d(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + break; + } + case kMipsTruncWD: { + FPURegister scratch = kScratchDoubleReg; + // Other arches use round to zero here, so we follow. + __ Trunc_w_d(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + break; + } + case kMipsFloorWS: { + FPURegister scratch = kScratchDoubleReg; + __ floor_w_s(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + break; + } + case kMipsCeilWS: { + FPURegister scratch = kScratchDoubleReg; + __ ceil_w_s(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + break; + } + case kMipsRoundWS: { + FPURegister scratch = kScratchDoubleReg; + __ round_w_s(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + break; + } + case kMipsTruncWS: { + FPURegister scratch = kScratchDoubleReg; + __ trunc_w_s(scratch, i.InputDoubleRegister(0)); + __ mfc1(i.OutputRegister(), scratch); + // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, + // because INT32_MIN allows easier out-of-bounds detection. + bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); + if (set_overflow_to_min_i32) { + __ Addu(kScratchReg, i.OutputRegister(), 1); + __ Slt(kScratchReg2, kScratchReg, i.OutputRegister()); + __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2); + } + break; + } + case kMipsTruncUwD: { + FPURegister scratch = kScratchDoubleReg; + __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch); + break; + } + case kMipsTruncUwS: { + FPURegister scratch = kScratchDoubleReg; + __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); + // Avoid UINT32_MAX as an overflow indicator and use 0 instead, + // because 0 allows easier out-of-bounds detection. + bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); + if (set_overflow_to_min_i32) { + __ Addu(kScratchReg, i.OutputRegister(), 1); + __ Movz(i.OutputRegister(), zero_reg, kScratchReg); + } + break; + } + case kMipsFloat64ExtractLowWord32: + __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); + break; + case kMipsFloat64ExtractHighWord32: + __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0)); + break; + case kMipsFloat64InsertLowWord32: + __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); + break; + case kMipsFloat64InsertHighWord32: + __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1)); + break; + case kMipsFloat64SilenceNaN: + __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + + // ... more basic instructions ... + case kMipsSeb: + __ Seb(i.OutputRegister(), i.InputRegister(0)); + break; + case kMipsSeh: + __ Seh(i.OutputRegister(), i.InputRegister(0)); + break; + case kMipsLbu: + __ lbu(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsLb: + __ lb(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsSb: + __ sb(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kMipsLhu: + __ lhu(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsUlhu: + __ Ulhu(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsLh: + __ lh(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsUlh: + __ Ulh(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsSh: + __ sh(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kMipsUsh: + __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg); + break; + case kMipsLw: + __ lw(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsUlw: + __ Ulw(i.OutputRegister(), i.MemoryOperand()); + break; + case kMipsSw: + __ sw(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kMipsUsw: + __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kMipsLwc1: { + __ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); + break; + } + case kMipsUlwc1: { + __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg); + break; + } + case kMipsSwc1: { + size_t index = 0; + MemOperand operand = i.MemoryOperand(&index); + FPURegister ft = i.InputOrZeroSingleRegister(index); + if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { + __ Move(kDoubleRegZero, 0.0); + } + __ swc1(ft, operand); + break; + } + case kMipsUswc1: { + size_t index = 0; + MemOperand operand = i.MemoryOperand(&index); + FPURegister ft = i.InputOrZeroSingleRegister(index); + if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { + __ Move(kDoubleRegZero, 0.0); + } + __ Uswc1(ft, operand, kScratchReg); + break; + } + case kMipsLdc1: + __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); + break; + case kMipsUldc1: + __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); + break; + case kMipsSdc1: { + FPURegister ft = i.InputOrZeroDoubleRegister(2); + if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { + __ Move(kDoubleRegZero, 0.0); + } + __ Sdc1(ft, i.MemoryOperand()); + break; + } + case kMipsUsdc1: { + FPURegister ft = i.InputOrZeroDoubleRegister(2); + if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { + __ Move(kDoubleRegZero, 0.0); + } + __ Usdc1(ft, i.MemoryOperand(), kScratchReg); + break; + } + case kMipsSync: { + __ sync(); + break; + } + case kMipsPush: + if (instr->InputAt(0)->IsFPRegister()) { + LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); + switch (op->representation()) { + case MachineRepresentation::kFloat32: + __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize)); + __ Subu(sp, sp, Operand(kFloatSize)); + frame_access_state()->IncreaseSPDelta(kFloatSize / + kSystemPointerSize); + break; + case MachineRepresentation::kFloat64: + __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); + __ Subu(sp, sp, Operand(kDoubleSize)); + frame_access_state()->IncreaseSPDelta(kDoubleSize / + kSystemPointerSize); + break; + default: { + UNREACHABLE(); + } + } + } else { + __ Push(i.InputRegister(0)); + frame_access_state()->IncreaseSPDelta(1); + } + break; + case kMipsPeek: { + int reverse_slot = i.InputInt32(0); + int offset = + FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); + if (instr->OutputAt(0)->IsFPRegister()) { + LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); + if (op->representation() == MachineRepresentation::kFloat64) { + __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset)); + } else if (op->representation() == MachineRepresentation::kFloat32) { + __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset)); + } else { + DCHECK_EQ(op->representation(), MachineRepresentation::kSimd128); + __ ld_b(i.OutputSimd128Register(), MemOperand(fp, offset)); + } + } else { + __ lw(i.OutputRegister(0), MemOperand(fp, offset)); + } + break; + } + case kMipsStackClaim: { + __ Subu(sp, sp, Operand(i.InputInt32(0))); + frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / + kSystemPointerSize); + break; + } + case kMipsStoreToStackSlot: { + if (instr->InputAt(0)->IsFPRegister()) { + LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); + if (op->representation() == MachineRepresentation::kFloat64) { + __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); + } else if (op->representation() == MachineRepresentation::kFloat32) { + __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1))); + } else { + DCHECK_EQ(MachineRepresentation::kSimd128, op->representation()); + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1))); + } + } else { + __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); + } + break; + } + case kMipsByteSwap32: { + __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4); + break; + } + case kMipsS128Load8Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ lb(kScratchReg, i.MemoryOperand()); + __ fill_b(i.OutputSimd128Register(), kScratchReg); + break; + } + case kMipsS128Load16Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ lh(kScratchReg, i.MemoryOperand()); + __ fill_h(i.OutputSimd128Register(), kScratchReg); + break; + } + case kMipsS128Load32Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ Lw(kScratchReg, i.MemoryOperand()); + __ fill_w(i.OutputSimd128Register(), kScratchReg); + break; + } + case kMipsS128Load64Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + MemOperand memLow = i.MemoryOperand(); + MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); + __ Lw(kScratchReg, memLow); + __ fill_w(dst, kScratchReg); + __ Lw(kScratchReg, memHigh); + __ fill_w(kSimd128ScratchReg, kScratchReg); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + break; + } + case kMipsS128Load8x8S: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + MemOperand memLow = i.MemoryOperand(); + MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); + __ Lw(kScratchReg, memLow); + __ fill_w(dst, kScratchReg); + __ Lw(kScratchReg, memHigh); + __ fill_w(kSimd128ScratchReg, kScratchReg); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + __ clti_s_b(kSimd128ScratchReg, dst, 0); + __ ilvr_b(dst, kSimd128ScratchReg, dst); + break; + } + case kMipsS128Load8x8U: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + MemOperand memLow = i.MemoryOperand(); + MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); + __ Lw(kScratchReg, memLow); + __ fill_w(dst, kScratchReg); + __ Lw(kScratchReg, memHigh); + __ fill_w(kSimd128ScratchReg, kScratchReg); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + __ ilvr_b(dst, kSimd128RegZero, dst); + break; + } + case kMipsS128Load16x4S: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + MemOperand memLow = i.MemoryOperand(); + MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); + __ Lw(kScratchReg, memLow); + __ fill_w(dst, kScratchReg); + __ Lw(kScratchReg, memHigh); + __ fill_w(kSimd128ScratchReg, kScratchReg); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + __ clti_s_h(kSimd128ScratchReg, dst, 0); + __ ilvr_h(dst, kSimd128ScratchReg, dst); + break; + } + case kMipsS128Load16x4U: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + MemOperand memLow = i.MemoryOperand(); + MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); + __ Lw(kScratchReg, memLow); + __ fill_w(dst, kScratchReg); + __ Lw(kScratchReg, memHigh); + __ fill_w(kSimd128ScratchReg, kScratchReg); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + __ ilvr_h(dst, kSimd128RegZero, dst); + break; + } + case kMipsS128Load32x2S: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + MemOperand memLow = i.MemoryOperand(); + MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); + __ Lw(kScratchReg, memLow); + __ fill_w(dst, kScratchReg); + __ Lw(kScratchReg, memHigh); + __ fill_w(kSimd128ScratchReg, kScratchReg); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + __ clti_s_w(kSimd128ScratchReg, dst, 0); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + break; + } + case kMipsS128Load32x2U: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + MemOperand memLow = i.MemoryOperand(); + MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); + __ Lw(kScratchReg, memLow); + __ fill_w(dst, kScratchReg); + __ Lw(kScratchReg, memHigh); + __ fill_w(kSimd128ScratchReg, kScratchReg); + __ ilvr_w(dst, kSimd128ScratchReg, dst); + __ ilvr_w(dst, kSimd128RegZero, dst); + break; + } + case kAtomicLoadInt8: + ASSEMBLE_ATOMIC_LOAD_INTEGER(lb); + break; + case kAtomicLoadUint8: + ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu); + break; + case kAtomicLoadInt16: + ASSEMBLE_ATOMIC_LOAD_INTEGER(lh); + break; + case kAtomicLoadUint16: + ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu); + break; + case kAtomicLoadWord32: + ASSEMBLE_ATOMIC_LOAD_INTEGER(lw); + break; + case kAtomicStoreWord8: + ASSEMBLE_ATOMIC_STORE_INTEGER(sb); + break; + case kAtomicStoreWord16: + ASSEMBLE_ATOMIC_STORE_INTEGER(sh); + break; + case kAtomicStoreWord32: + ASSEMBLE_ATOMIC_STORE_INTEGER(sw); + break; + case kAtomicExchangeInt8: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8); + break; + case kAtomicExchangeUint8: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8); + break; + case kAtomicExchangeInt16: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16); + break; + case kAtomicExchangeUint16: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16); + break; + case kAtomicExchangeWord32: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(); + break; + case kAtomicCompareExchangeInt8: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8); + break; + case kAtomicCompareExchangeUint8: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8); + break; + case kAtomicCompareExchangeInt16: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16); + break; + case kAtomicCompareExchangeUint16: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16); + break; + case kAtomicCompareExchangeWord32: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(); + break; +#define ATOMIC_BINOP_CASE(op, inst) \ + case kAtomic##op##Int8: \ + ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \ + break; \ + case kAtomic##op##Uint8: \ + ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \ + break; \ + case kAtomic##op##Int16: \ + ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \ + break; \ + case kAtomic##op##Uint16: \ + ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \ + break; \ + case kAtomic##op##Word32: \ + ASSEMBLE_ATOMIC_BINOP(inst); \ + break; + ATOMIC_BINOP_CASE(Add, Addu) + ATOMIC_BINOP_CASE(Sub, Subu) + ATOMIC_BINOP_CASE(And, And) + ATOMIC_BINOP_CASE(Or, Or) + ATOMIC_BINOP_CASE(Xor, Xor) +#undef ATOMIC_BINOP_CASE + case kMipsWord32AtomicPairLoad: { + if (IsMipsArchVariant(kMips32r6)) { + if (instr->OutputCount() > 0) { + Register second_output = instr->OutputCount() == 2 + ? i.OutputRegister(1) + : i.TempRegister(1); + __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); + __ llx(second_output, MemOperand(a0, 4)); + __ ll(i.OutputRegister(0), MemOperand(a0, 0)); + __ sync(); + } + } else { + FrameScope scope(tasm(), StackFrame::MANUAL); + __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); + __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); + __ PrepareCallCFunction(1, 0, kScratchReg); + __ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0); + __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); + } + break; + } + case kMipsWord32AtomicPairStore: { + if (IsMipsArchVariant(kMips32r6)) { + Label store; + __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); + __ sync(); + __ bind(&store); + __ llx(i.TempRegister(2), MemOperand(a0, 4)); + __ ll(i.TempRegister(1), MemOperand(a0, 0)); + __ Move(i.TempRegister(1), i.InputRegister(2)); + __ scx(i.InputRegister(3), MemOperand(a0, 4)); + __ sc(i.TempRegister(1), MemOperand(a0, 0)); + __ BranchShort(&store, eq, i.TempRegister(1), Operand(zero_reg)); + __ sync(); + } else { + FrameScope scope(tasm(), StackFrame::MANUAL); + __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); + __ PushCallerSaved(SaveFPRegsMode::kIgnore); + __ PrepareCallCFunction(3, 0, kScratchReg); + __ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0); + __ PopCallerSaved(SaveFPRegsMode::kIgnore); + } + break; + } +#define ATOMIC64_BINOP_ARITH_CASE(op, instr, external) \ + case kMipsWord32AtomicPair##op: \ + ASSEMBLE_ATOMIC64_ARITH_BINOP(instr, external); \ + break; + ATOMIC64_BINOP_ARITH_CASE(Add, AddPair, atomic_pair_add_function) + ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair, atomic_pair_sub_function) +#undef ATOMIC64_BINOP_ARITH_CASE +#define ATOMIC64_BINOP_LOGIC_CASE(op, instr, external) \ + case kMipsWord32AtomicPair##op: \ + ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr, external); \ + break; + ATOMIC64_BINOP_LOGIC_CASE(And, AndPair, atomic_pair_and_function) + ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function) + ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function) +#undef ATOMIC64_BINOP_LOGIC_CASE + case kMipsWord32AtomicPairExchange: + if (IsMipsArchVariant(kMips32r6)) { + Label binop; + Register oldval_low = + instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); + Register oldval_high = + instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); + __ sync(); + __ bind(&binop); + __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); + __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); + __ Move(i.TempRegister(1), i.InputRegister(2)); + __ scx(i.InputRegister(3), MemOperand(i.TempRegister(0), 4)); + __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); + __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); + __ sync(); + } else { + FrameScope scope(tasm(), StackFrame::MANUAL); + __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); + __ PrepareCallCFunction(3, 0, kScratchReg); + __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); + __ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3, + 0); + __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); + } + break; + case kMipsWord32AtomicPairCompareExchange: { + if (IsMipsArchVariant(kMips32r6)) { + Label compareExchange, exit; + Register oldval_low = + instr->OutputCount() >= 1 ? i.OutputRegister(0) : kScratchReg; + Register oldval_high = + instr->OutputCount() >= 2 ? i.OutputRegister(1) : kScratchReg2; + __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); + __ sync(); + __ bind(&compareExchange); + __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); + __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); + __ BranchShort(&exit, ne, i.InputRegister(2), Operand(oldval_low)); + __ BranchShort(&exit, ne, i.InputRegister(3), Operand(oldval_high)); + __ mov(kScratchReg, i.InputRegister(4)); + __ scx(i.InputRegister(5), MemOperand(i.TempRegister(0), 4)); + __ sc(kScratchReg, MemOperand(i.TempRegister(0), 0)); + __ BranchShort(&compareExchange, eq, kScratchReg, Operand(zero_reg)); + __ bind(&exit); + __ sync(); + } else { + FrameScope scope(tasm(), StackFrame::MANUAL); + __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); + __ PrepareCallCFunction(5, 0, kScratchReg); + __ addu(a0, i.InputRegister(0), i.InputRegister(1)); + __ sw(i.InputRegister(5), MemOperand(sp, 16)); + __ CallCFunction( + ExternalReference::atomic_pair_compare_exchange_function(), 5, 0); + __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); + } + break; + } + case kMipsS128Zero: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), + i.OutputSimd128Register()); + break; + } + case kMipsI32x4Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fill_w(i.OutputSimd128Register(), i.InputRegister(0)); + break; + } + case kMipsI32x4ExtractLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0), + i.InputInt8(1)); + break; + } + case kMipsI32x4ReplaceLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + if (src != dst) { + __ move_v(dst, src); + } + __ insert_w(dst, i.InputInt8(1), i.InputRegister(2)); + break; + } + case kMipsI32x4Add: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4Sub: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4ExtAddPairwiseI16x8S: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ hadd_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4ExtAddPairwiseI16x8U: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ hadd_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(0)); + break; + } + case kMipsF64x2Abs: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); + break; + } + case kMipsF64x2Neg: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); + break; + } + case kMipsF64x2Sqrt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsF64x2Add: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d); + break; + } + case kMipsF64x2Sub: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d); + break; + } + case kMipsF64x2Mul: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d); + break; + } + case kMipsF64x2Div: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d); + break; + } + case kMipsF64x2Min: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmin_d); + break; + } + case kMipsF64x2Max: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmax_d); + break; + } + case kMipsF64x2Eq: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF64x2Ne: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fcne_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF64x2Lt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF64x2Le: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF64x2Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + __ FmoveLow(kScratchReg, i.InputDoubleRegister(0)); + __ insert_w(dst, 0, kScratchReg); + __ insert_w(dst, 2, kScratchReg); + __ FmoveHigh(kScratchReg, i.InputDoubleRegister(0)); + __ insert_w(dst, 1, kScratchReg); + __ insert_w(dst, 3, kScratchReg); + break; + } + case kMipsF64x2ExtractLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1) * 2); + __ FmoveLow(i.OutputDoubleRegister(), kScratchReg); + __ copy_u_w(kScratchReg, i.InputSimd128Register(0), + i.InputInt8(1) * 2 + 1); + __ FmoveHigh(i.OutputDoubleRegister(), kScratchReg); + break; + } + case kMipsF64x2ReplaceLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + if (src != dst) { + __ move_v(dst, src); + } + __ FmoveLow(kScratchReg, i.InputDoubleRegister(2)); + __ insert_w(dst, i.InputInt8(1) * 2, kScratchReg); + __ FmoveHigh(kScratchReg, i.InputDoubleRegister(2)); + __ insert_w(dst, i.InputInt8(1) * 2 + 1, kScratchReg); + break; + } + case kMipsF64x2Pmin: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register lhs = i.InputSimd128Register(0); + Simd128Register rhs = i.InputSimd128Register(1); + // dst = rhs < lhs ? rhs : lhs + __ fclt_d(dst, rhs, lhs); + __ bsel_v(dst, lhs, rhs); + break; + } + case kMipsF64x2Pmax: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register lhs = i.InputSimd128Register(0); + Simd128Register rhs = i.InputSimd128Register(1); + // dst = lhs < rhs ? rhs : lhs + __ fclt_d(dst, lhs, rhs); + __ bsel_v(dst, lhs, rhs); + break; + } + case kMipsF64x2Ceil: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + __ li(kScratchReg2, kRoundToPlusInf); + __ ctcmsa(MSACSR, kScratchReg2); + __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsF64x2Floor: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + __ li(kScratchReg2, kRoundToMinusInf); + __ ctcmsa(MSACSR, kScratchReg2); + __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsF64x2Trunc: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + __ li(kScratchReg2, kRoundToZero); + __ ctcmsa(MSACSR, kScratchReg2); + __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsF64x2NearestInt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + // kRoundToNearest == 0 + __ ctcmsa(MSACSR, zero_reg); + __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsF64x2ConvertLowI32x4S: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); + __ slli_d(kSimd128RegZero, kSimd128RegZero, 32); + __ srai_d(kSimd128RegZero, kSimd128RegZero, 32); + __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero); + break; + } + case kMipsF64x2ConvertLowI32x4U: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); + __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero); + break; + } + case kMipsF64x2PromoteLowF32x4: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsI64x2Add: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI64x2Sub: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI64x2Mul: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI64x2Neg: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ subv_d(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI64x2Shl: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt6(1)); + break; + } + case kMipsI64x2ShrS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt6(1)); + break; + } + case kMipsI64x2ShrU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt6(1)); + break; + } + case kMipsI64x2BitMask: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register scratch0 = kSimd128RegZero; + Simd128Register scratch1 = kSimd128ScratchReg; + __ srli_d(scratch0, src, 63); + __ shf_w(scratch1, scratch0, 0x02); + __ slli_d(scratch1, scratch1, 1); + __ or_v(scratch0, scratch0, scratch1); + __ copy_u_b(dst, scratch0, 0); + break; + } + case kMipsI64x2Eq: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI64x2Ne: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), + i.OutputSimd128Register()); + break; + } + case kMipsI64x2GtS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI64x2GeS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI64x2Abs: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ adds_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + kSimd128RegZero); + break; + } + case kMipsI64x2SConvertI32x4Low: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src = i.InputSimd128Register(0); + __ ilvr_w(kSimd128ScratchReg, src, src); + __ slli_d(dst, kSimd128ScratchReg, 32); + __ srai_d(dst, dst, 32); + break; + } + case kMipsI64x2SConvertI32x4High: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src = i.InputSimd128Register(0); + __ ilvl_w(kSimd128ScratchReg, src, src); + __ slli_d(dst, kSimd128ScratchReg, 32); + __ srai_d(dst, dst, 32); + break; + } + case kMipsI64x2UConvertI32x4Low: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI64x2UConvertI32x4High: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI64x2ExtMulLowI32x4S: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_s_d); + break; + case kMipsI64x2ExtMulHighI32x4S: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_w, dotp_s_d); + break; + case kMipsI64x2ExtMulLowI32x4U: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_u_d); + break; + case kMipsI64x2ExtMulHighI32x4U: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_w, dotp_u_d); + break; + case kMipsI32x4ExtMulLowI16x8S: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_h, dotp_s_w); + break; + case kMipsI32x4ExtMulHighI16x8S: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_h, dotp_s_w); + break; + case kMipsI32x4ExtMulLowI16x8U: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_h, dotp_u_w); + break; + case kMipsI32x4ExtMulHighI16x8U: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_h, dotp_u_w); + break; + case kMipsI16x8ExtMulLowI8x16S: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_b, dotp_s_h); + break; + case kMipsI16x8ExtMulHighI8x16S: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_b, dotp_s_h); + break; + case kMipsI16x8ExtMulLowI8x16U: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_b, dotp_u_h); + break; + case kMipsI16x8ExtMulHighI8x16U: + ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_b, dotp_u_h); + break; + case kMipsF32x4Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ FmoveLow(kScratchReg, i.InputSingleRegister(0)); + __ fill_w(i.OutputSimd128Register(), kScratchReg); + break; + } + case kMipsF32x4ExtractLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); + __ FmoveLow(i.OutputSingleRegister(), kScratchReg); + break; + } + case kMipsF32x4ReplaceLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + if (src != dst) { + __ move_v(dst, src); + } + __ FmoveLow(kScratchReg, i.InputSingleRegister(2)); + __ insert_w(dst, i.InputInt8(1), kScratchReg); + break; + } + case kMipsF32x4SConvertI32x4: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsF32x4UConvertI32x4: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsF32x4DemoteF64x2Zero: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4Mul: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4MaxS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4MinS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4Eq: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4Ne: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ nor_v(dst, dst, dst); + break; + } + case kMipsI32x4Shl: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1)); + break; + } + case kMipsI32x4ShrS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1)); + break; + } + case kMipsI32x4ShrU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1)); + break; + } + case kMipsI32x4MaxU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4MinU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsS128Select: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0)); + __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2), + i.InputSimd128Register(1)); + break; + } + case kMipsS128AndNot: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + __ nor_v(dst, i.InputSimd128Register(1), i.InputSimd128Register(1)); + __ and_v(dst, dst, i.InputSimd128Register(0)); + break; + } + case kMipsF32x4Abs: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); + break; + } + case kMipsF32x4Neg: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); + break; + } + case kMipsF32x4Sqrt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsF32x4RecipApprox: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsF32x4RecipSqrtApprox: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsF32x4Add: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Sub: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Mul: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Div: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Max: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Min: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Eq: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Ne: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Lt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Le: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsF32x4Pmin: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register lhs = i.InputSimd128Register(0); + Simd128Register rhs = i.InputSimd128Register(1); + // dst = rhs < lhs ? rhs : lhs + __ fclt_w(dst, rhs, lhs); + __ bsel_v(dst, lhs, rhs); + break; + } + case kMipsF32x4Pmax: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register lhs = i.InputSimd128Register(0); + Simd128Register rhs = i.InputSimd128Register(1); + // dst = lhs < rhs ? rhs : lhs + __ fclt_w(dst, lhs, rhs); + __ bsel_v(dst, lhs, rhs); + break; + } + case kMipsF32x4Ceil: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + __ li(kScratchReg2, kRoundToPlusInf); + __ ctcmsa(MSACSR, kScratchReg2); + __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsF32x4Floor: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + __ li(kScratchReg2, kRoundToMinusInf); + __ ctcmsa(MSACSR, kScratchReg2); + __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsF32x4Trunc: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + __ li(kScratchReg2, kRoundToZero); + __ ctcmsa(MSACSR, kScratchReg2); + __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsF32x4NearestInt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cfcmsa(kScratchReg, MSACSR); + // kRoundToNearest == 0 + __ ctcmsa(MSACSR, zero_reg); + __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ ctcmsa(MSACSR, kScratchReg); + break; + } + case kMipsI32x4SConvertF32x4: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsI32x4UConvertF32x4: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsI32x4Neg: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ subv_w(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4GtS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4GeS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4GtU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4GeU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4Abs: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + kSimd128RegZero); + break; + } + case kMipsI32x4BitMask: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register scratch0 = kSimd128RegZero; + Simd128Register scratch1 = kSimd128ScratchReg; + __ srli_w(scratch0, src, 31); + __ srli_d(scratch1, scratch0, 31); + __ or_v(scratch0, scratch0, scratch1); + __ shf_w(scratch1, scratch0, 0x0E); + __ slli_d(scratch1, scratch1, 2); + __ or_v(scratch0, scratch0, scratch1); + __ copy_u_b(dst, scratch0, 0); + break; + } + case kMipsI32x4DotI16x8S: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI32x4TruncSatF64x2SZero: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); + __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero, + kSimd128ScratchReg); + break; + } + case kMipsI32x4TruncSatF64x2UZero: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); + __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero, + kSimd128ScratchReg); + break; + } + case kMipsI16x8Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); + break; + } + case kMipsI16x8ExtractLaneU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0), + i.InputInt8(1)); + break; + } + case kMipsI16x8ExtractLaneS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0), + i.InputInt8(1)); + break; + } + case kMipsI16x8ReplaceLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + if (src != dst) { + __ move_v(dst, src); + } + __ insert_h(dst, i.InputInt8(1), i.InputRegister(2)); + break; + } + case kMipsI16x8Neg: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ subv_h(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8Shl: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt4(1)); + break; + } + case kMipsI16x8ShrS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt4(1)); + break; + } + case kMipsI16x8ShrU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt4(1)); + break; + } + case kMipsI16x8Add: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8AddSatS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8Sub: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8SubSatS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8Mul: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8MaxS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8MinS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8Eq: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8Ne: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ nor_v(dst, dst, dst); + break; + } + case kMipsI16x8GtS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8GeS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8AddSatU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8SubSatU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8MaxU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8MinU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8GtU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8GeU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8RoundingAverageU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8Abs: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + kSimd128RegZero); + break; + } + case kMipsI16x8BitMask: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register scratch0 = kSimd128RegZero; + Simd128Register scratch1 = kSimd128ScratchReg; + __ srli_h(scratch0, src, 15); + __ srli_w(scratch1, scratch0, 15); + __ or_v(scratch0, scratch0, scratch1); + __ srli_d(scratch1, scratch0, 30); + __ or_v(scratch0, scratch0, scratch1); + __ shf_w(scratch1, scratch0, 0x0E); + __ slli_d(scratch1, scratch1, 4); + __ or_v(scratch0, scratch0, scratch1); + __ copy_u_b(dst, scratch0, 0); + break; + } + case kMipsI16x8Q15MulRSatS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI16x8ExtAddPairwiseI8x16S: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ hadd_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8ExtAddPairwiseI8x16U: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ hadd_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16Splat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fill_b(i.OutputSimd128Register(), i.InputRegister(0)); + break; + } + case kMipsI8x16ExtractLaneU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0), + i.InputInt8(1)); + break; + } + case kMipsI8x16ExtractLaneS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0), + i.InputInt8(1)); + break; + } + case kMipsI8x16ReplaceLane: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + if (src != dst) { + __ move_v(dst, src); + } + __ insert_b(dst, i.InputInt8(1), i.InputRegister(2)); + break; + } + case kMipsI8x16Neg: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ subv_b(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16Shl: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt3(1)); + break; + } + case kMipsI8x16ShrS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt3(1)); + break; + } + case kMipsI8x16Add: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16AddSatS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16Sub: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16SubSatS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16MaxS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16MinS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16Eq: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16Ne: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ nor_v(dst, dst, dst); + break; + } + case kMipsI8x16GtS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16GeS: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16ShrU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt3(1)); + break; + } + case kMipsI8x16AddSatU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16SubSatU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16MaxU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16MinU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsI8x16GtU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16GeU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16RoundingAverageU: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16Abs: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), + kSimd128RegZero); + break; + } + case kMipsI8x16Popcnt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kMipsI8x16BitMask: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + Simd128Register scratch0 = kSimd128RegZero; + Simd128Register scratch1 = kSimd128ScratchReg; + __ srli_b(scratch0, src, 7); + __ srli_h(scratch1, scratch0, 7); + __ or_v(scratch0, scratch0, scratch1); + __ srli_w(scratch1, scratch0, 14); + __ or_v(scratch0, scratch0, scratch1); + __ srli_d(scratch1, scratch0, 28); + __ or_v(scratch0, scratch0, scratch1); + __ shf_w(scratch1, scratch0, 0x0E); + __ ilvev_b(scratch0, scratch1, scratch0); + __ copy_u_h(dst, scratch0, 0); + break; + } + case kMipsS128And: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsS128Or: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsS128Xor: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kMipsS128Not: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(0)); + break; + } + case kMipsV128AnyTrue: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Label all_false; + + __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero, + i.InputSimd128Register(0), USE_DELAY_SLOT); + __ li(dst, 0); // branch delay slot + __ li(dst, -1); + __ bind(&all_false); + break; + } + case kMipsI64x2AllTrue: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Label all_true; + __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero, + i.InputSimd128Register(0), USE_DELAY_SLOT); + __ li(dst, -1); // branch delay slot + __ li(dst, 0); + __ bind(&all_true); + break; + } + case kMipsI32x4AllTrue: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Label all_true; + __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero, + i.InputSimd128Register(0), USE_DELAY_SLOT); + __ li(dst, -1); // branch delay slot + __ li(dst, 0); + __ bind(&all_true); + break; + } + case kMipsI16x8AllTrue: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Label all_true; + __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero, + i.InputSimd128Register(0), USE_DELAY_SLOT); + __ li(dst, -1); // branch delay slot + __ li(dst, 0); + __ bind(&all_true); + break; + } + case kMipsI8x16AllTrue: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Register dst = i.OutputRegister(); + Label all_true; + __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero, + i.InputSimd128Register(0), USE_DELAY_SLOT); + __ li(dst, -1); // branch delay slot + __ li(dst, 0); + __ bind(&all_true); + break; + } + case kMipsMsaLd: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ld_b(i.OutputSimd128Register(), i.MemoryOperand()); + break; + } + case kMipsMsaSt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ st_b(i.InputSimd128Register(2), i.MemoryOperand()); + break; + } + case kMipsS32x4InterleaveRight: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] + // dst = [5, 1, 4, 0] + __ ilvr_w(dst, src1, src0); + break; + } + case kMipsS32x4InterleaveLeft: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] + // dst = [7, 3, 6, 2] + __ ilvl_w(dst, src1, src0); + break; + } + case kMipsS32x4PackEven: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] + // dst = [6, 4, 2, 0] + __ pckev_w(dst, src1, src0); + break; + } + case kMipsS32x4PackOdd: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] + // dst = [7, 5, 3, 1] + __ pckod_w(dst, src1, src0); + break; + } + case kMipsS32x4InterleaveEven: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] + // dst = [6, 2, 4, 0] + __ ilvev_w(dst, src1, src0); + break; + } + case kMipsS32x4InterleaveOdd: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] + // dst = [7, 3, 5, 1] + __ ilvod_w(dst, src1, src0); + break; + } + case kMipsS32x4Shuffle: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + + int32_t shuffle = i.InputInt32(2); + + if (src0 == src1) { + // Unary S32x4 shuffles are handled with shf.w instruction + unsigned lane = shuffle & 0xFF; + if (FLAG_debug_code) { + // range of all four lanes, for unary instruction, + // should belong to the same range, which can be one of these: + // [0, 3] or [4, 7] + if (lane >= 4) { + int32_t shuffle_helper = shuffle; + for (int i = 0; i < 4; ++i) { + lane = shuffle_helper & 0xFF; + CHECK_GE(lane, 4); + shuffle_helper >>= 8; + } + } + } + uint32_t i8 = 0; + for (int i = 0; i < 4; i++) { + lane = shuffle & 0xFF; + if (lane >= 4) { + lane -= 4; + } + DCHECK_GT(4, lane); + i8 |= lane << (2 * i); + shuffle >>= 8; + } + __ shf_w(dst, src0, i8); + } else { + // For binary shuffles use vshf.w instruction + if (dst == src0) { + __ move_v(kSimd128ScratchReg, src0); + src0 = kSimd128ScratchReg; + } else if (dst == src1) { + __ move_v(kSimd128ScratchReg, src1); + src1 = kSimd128ScratchReg; + } + + __ li(kScratchReg, i.InputInt32(2)); + __ insert_w(dst, 0, kScratchReg); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvr_b(dst, kSimd128RegZero, dst); + __ ilvr_h(dst, kSimd128RegZero, dst); + __ vshf_w(dst, src1, src0); + } + break; + } + case kMipsS16x8InterleaveRight: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] + // dst = [11, 3, 10, 2, 9, 1, 8, 0] + __ ilvr_h(dst, src1, src0); + break; + } + case kMipsS16x8InterleaveLeft: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] + // dst = [15, 7, 14, 6, 13, 5, 12, 4] + __ ilvl_h(dst, src1, src0); + break; + } + case kMipsS16x8PackEven: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] + // dst = [14, 12, 10, 8, 6, 4, 2, 0] + __ pckev_h(dst, src1, src0); + break; + } + case kMipsS16x8PackOdd: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] + // dst = [15, 13, 11, 9, 7, 5, 3, 1] + __ pckod_h(dst, src1, src0); + break; + } + case kMipsS16x8InterleaveEven: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] + // dst = [14, 6, 12, 4, 10, 2, 8, 0] + __ ilvev_h(dst, src1, src0); + break; + } + case kMipsS16x8InterleaveOdd: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] + // dst = [15, 7, ... 11, 3, 9, 1] + __ ilvod_h(dst, src1, src0); + break; + } + case kMipsS16x4Reverse: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3] + // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B + __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); + break; + } + case kMipsS16x2Reverse: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1] + // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 + __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); + break; + } + case kMipsS8x16InterleaveRight: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] + // dst = [23, 7, ... 17, 1, 16, 0] + __ ilvr_b(dst, src1, src0); + break; + } + case kMipsS8x16InterleaveLeft: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] + // dst = [31, 15, ... 25, 9, 24, 8] + __ ilvl_b(dst, src1, src0); + break; + } + case kMipsS8x16PackEven: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] + // dst = [30, 28, ... 6, 4, 2, 0] + __ pckev_b(dst, src1, src0); + break; + } + case kMipsS8x16PackOdd: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] + // dst = [31, 29, ... 7, 5, 3, 1] + __ pckod_b(dst, src1, src0); + break; + } + case kMipsS8x16InterleaveEven: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] + // dst = [30, 14, ... 18, 2, 16, 0] + __ ilvev_b(dst, src1, src0); + break; + } + case kMipsS8x16InterleaveOdd: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] + // dst = [31, 15, ... 19, 3, 17, 1] + __ ilvod_b(dst, src1, src0); + break; + } + case kMipsS8x16Concat: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + DCHECK(dst == i.InputSimd128Register(0)); + __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2)); + break; + } + case kMipsI8x16Shuffle: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + + if (dst == src0) { + __ move_v(kSimd128ScratchReg, src0); + src0 = kSimd128ScratchReg; + } else if (dst == src1) { + __ move_v(kSimd128ScratchReg, src1); + src1 = kSimd128ScratchReg; + } + + __ li(kScratchReg, i.InputInt32(2)); + __ insert_w(dst, 0, kScratchReg); + __ li(kScratchReg, i.InputInt32(3)); + __ insert_w(dst, 1, kScratchReg); + __ li(kScratchReg, i.InputInt32(4)); + __ insert_w(dst, 2, kScratchReg); + __ li(kScratchReg, i.InputInt32(5)); + __ insert_w(dst, 3, kScratchReg); + __ vshf_b(dst, src1, src0); + break; + } + case kMipsI8x16Swizzle: { + Simd128Register dst = i.OutputSimd128Register(), + tbl = i.InputSimd128Register(0), + ctl = i.InputSimd128Register(1); + DCHECK(dst != ctl && dst != tbl); + Simd128Register zeroReg = i.TempSimd128Register(0); + __ fill_w(zeroReg, zero_reg); + __ move_v(dst, ctl); + __ vshf_b(dst, tbl, zeroReg); + break; + } + case kMipsS8x8Reverse: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7] + // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1 + // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B + __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1); + __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B); + break; + } + case kMipsS8x4Reverse: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3] + // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B + __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); + break; + } + case kMipsS8x2Reverse: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1] + // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 + __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); + break; + } + case kMipsI32x4SConvertI16x8Low: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src = i.InputSimd128Register(0); + __ ilvr_h(kSimd128ScratchReg, src, src); + __ slli_w(dst, kSimd128ScratchReg, 16); + __ srai_w(dst, dst, 16); + break; + } + case kMipsI32x4SConvertI16x8High: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src = i.InputSimd128Register(0); + __ ilvl_h(kSimd128ScratchReg, src, src); + __ slli_w(dst, kSimd128ScratchReg, 16); + __ srai_w(dst, dst, 16); + break; + } + case kMipsI32x4UConvertI16x8Low: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI32x4UConvertI16x8High: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8SConvertI8x16Low: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src = i.InputSimd128Register(0); + __ ilvr_b(kSimd128ScratchReg, src, src); + __ slli_h(dst, kSimd128ScratchReg, 8); + __ srai_h(dst, dst, 8); + break; + } + case kMipsI16x8SConvertI8x16High: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src = i.InputSimd128Register(0); + __ ilvl_b(kSimd128ScratchReg, src, src); + __ slli_h(dst, kSimd128ScratchReg, 8); + __ srai_h(dst, dst, 8); + break; + } + case kMipsI16x8SConvertI32x4: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src0 = i.InputSimd128Register(0); + Simd128Register src1 = i.InputSimd128Register(1); + __ sat_s_w(kSimd128ScratchReg, src0, 15); + __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch + __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg); + break; + } + case kMipsI16x8UConvertI32x4: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src0 = i.InputSimd128Register(0); + Simd128Register src1 = i.InputSimd128Register(1); + __ sat_u_w(kSimd128ScratchReg, src0, 15); + __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch + __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg); + break; + } + case kMipsI16x8UConvertI8x16Low: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI16x8UConvertI8x16High: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero, + i.InputSimd128Register(0)); + break; + } + case kMipsI8x16SConvertI16x8: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src0 = i.InputSimd128Register(0); + Simd128Register src1 = i.InputSimd128Register(1); + __ sat_s_h(kSimd128ScratchReg, src0, 7); + __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch + __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg); + break; + } + case kMipsI8x16UConvertI16x8: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + Simd128Register dst = i.OutputSimd128Register(); + Simd128Register src0 = i.InputSimd128Register(0); + Simd128Register src1 = i.InputSimd128Register(1); + __ sat_u_h(kSimd128ScratchReg, src0, 7); + __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch + __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg); + break; + } + } + return kSuccess; +} + +void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, + Instruction* instr, FlagsCondition condition, + Label* tlabel, Label* flabel, bool fallthru) { +#undef __ +#define __ tasm-> + + Condition cc = kNoCondition; + // MIPS does not have condition code flags, so compare and branch are + // implemented differently than on the other arch's. The compare operations + // emit mips pseudo-instructions, which are handled here by branch + // instructions that do the actual comparison. Essential that the input + // registers to compare pseudo-op are not modified before this branch op, as + // they are tested here. + + MipsOperandConverter i(gen, instr); + if (instr->arch_opcode() == kMipsTst) { + cc = FlagsConditionToConditionTst(condition); + __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg)); + } else if (instr->arch_opcode() == kMipsAddOvf || + instr->arch_opcode() == kMipsSubOvf) { + // Overflow occurs if overflow register is negative + switch (condition) { + case kOverflow: + __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg)); + break; + case kNotOverflow: + __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg)); + break; + default: + UNSUPPORTED_COND(instr->arch_opcode(), condition); + } + } else if (instr->arch_opcode() == kMipsMulOvf) { + // Overflow occurs if overflow register is not zero + switch (condition) { + case kOverflow: + __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg)); + break; + case kNotOverflow: + __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg)); + break; + default: + UNSUPPORTED_COND(kMipsMulOvf, condition); + } + } else if (instr->arch_opcode() == kMipsCmp) { + cc = FlagsConditionToConditionCmp(condition); + __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); + } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { + cc = FlagsConditionToConditionCmp(condition); + DCHECK((cc == ls) || (cc == hi)); + if (cc == ls) { + __ xori(i.TempRegister(0), i.TempRegister(0), 1); + } + __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg)); + } else if (instr->arch_opcode() == kMipsCmpS || + instr->arch_opcode() == kMipsCmpD) { + bool predicate; + FlagsConditionToConditionCmpFPU(&predicate, condition); + if (predicate) { + __ BranchTrueF(tlabel); + } else { + __ BranchFalseF(tlabel); + } + } else { + PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", + instr->arch_opcode()); + UNIMPLEMENTED(); + } + if (!fallthru) __ Branch(flabel); // no fallthru to flabel. +#undef __ +#define __ tasm()-> +} + +// Assembles branches after an instruction. +void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { + Label* tlabel = branch->true_label; + Label* flabel = branch->false_label; + AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + branch->fallthru); +} + +void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, + BranchInfo* branch) { + AssembleArchBranch(instr, branch); +} + +void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder( + RpoNumber target) { + __ Branch(GetLabel(target)); +} + +#if V8_ENABLE_WEBASSEMBLY +void CodeGenerator::AssembleArchTrap(Instruction* instr, + FlagsCondition condition) { + class OutOfLineTrap final : public OutOfLineCode { + public: + OutOfLineTrap(CodeGenerator* gen, Instruction* instr) + : OutOfLineCode(gen), instr_(instr), gen_(gen) {} + + void Generate() final { + MipsOperandConverter i(gen_, instr_); + TrapId trap_id = + static_cast(i.InputInt32(instr_->InputCount() - 1)); + GenerateCallToTrap(trap_id); + } + + private: + void GenerateCallToTrap(TrapId trap_id) { + if (trap_id == TrapId::kInvalid) { + // We cannot test calls to the runtime in cctest/test-run-wasm. + // Therefore we emit a call to C here instead of a call to the runtime. + // We use the context register as the scratch register, because we do + // not have a context here. + __ PrepareCallCFunction(0, 0, cp); + __ CallCFunction( + ExternalReference::wasm_call_trap_callback_for_testing(), 0); + __ LeaveFrame(StackFrame::WASM); + auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); + int pop_count = static_cast(call_descriptor->ParameterSlotCount()); + __ Drop(pop_count); + __ Ret(); + } else { + gen_->AssembleSourcePosition(instr_); + // A direct call to a wasm runtime stub defined in this module. + // Just encode the stub index. This will be patched when the code + // is added to the native module and copied into wasm code space. + __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); + ReferenceMap* reference_map = + gen_->zone()->New(gen_->zone()); + gen_->RecordSafepoint(reference_map); + if (FLAG_debug_code) { + __ stop(); + } + } + } + + Instruction* instr_; + CodeGenerator* gen_; + }; + auto ool = zone()->New(this, instr); + Label* tlabel = ool->entry(); + AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); +} +#endif // V8_ENABLE_WEBASSEMBLY + +// Assembles boolean materializations after an instruction. +void CodeGenerator::AssembleArchBoolean(Instruction* instr, + FlagsCondition condition) { + MipsOperandConverter i(this, instr); + + // Materialize a full 32-bit 1 or 0 value. The result register is always the + // last output of the instruction. + DCHECK_NE(0u, instr->OutputCount()); + Register result = i.OutputRegister(instr->OutputCount() - 1); + Condition cc = kNoCondition; + // MIPS does not have condition code flags, so compare and branch are + // implemented differently than on the other arch's. The compare operations + // emit mips pseudo-instructions, which are checked and handled here. + + if (instr->arch_opcode() == kMipsTst) { + cc = FlagsConditionToConditionTst(condition); + if (cc == eq) { + __ Sltu(result, kScratchReg, 1); + } else { + __ Sltu(result, zero_reg, kScratchReg); + } + return; + } else if (instr->arch_opcode() == kMipsAddOvf || + instr->arch_opcode() == kMipsSubOvf) { + // Overflow occurs if overflow register is negative + __ slt(result, kScratchReg, zero_reg); + } else if (instr->arch_opcode() == kMipsMulOvf) { + // Overflow occurs if overflow register is not zero + __ Sgtu(result, kScratchReg, zero_reg); + } else if (instr->arch_opcode() == kMipsCmp) { + cc = FlagsConditionToConditionCmp(condition); + switch (cc) { + case eq: + case ne: { + Register left = i.InputRegister(0); + Operand right = i.InputOperand(1); + if (instr->InputAt(1)->IsImmediate()) { + if (is_int16(-right.immediate())) { + if (right.immediate() == 0) { + if (cc == eq) { + __ Sltu(result, left, 1); + } else { + __ Sltu(result, zero_reg, left); + } + } else { + __ Addu(result, left, -right.immediate()); + if (cc == eq) { + __ Sltu(result, result, 1); + } else { + __ Sltu(result, zero_reg, result); + } + } + } else { + if (is_uint16(right.immediate())) { + __ Xor(result, left, right); + } else { + __ li(kScratchReg, right); + __ Xor(result, left, kScratchReg); + } + if (cc == eq) { + __ Sltu(result, result, 1); + } else { + __ Sltu(result, zero_reg, result); + } + } + } else { + __ Xor(result, left, right); + if (cc == eq) { + __ Sltu(result, result, 1); + } else { + __ Sltu(result, zero_reg, result); + } + } + } break; + case lt: + case ge: { + Register left = i.InputRegister(0); + Operand right = i.InputOperand(1); + __ Slt(result, left, right); + if (cc == ge) { + __ xori(result, result, 1); + } + } break; + case gt: + case le: { + Register left = i.InputRegister(1); + Operand right = i.InputOperand(0); + __ Slt(result, left, right); + if (cc == le) { + __ xori(result, result, 1); + } + } break; + case lo: + case hs: { + Register left = i.InputRegister(0); + Operand right = i.InputOperand(1); + __ Sltu(result, left, right); + if (cc == hs) { + __ xori(result, result, 1); + } + } break; + case hi: + case ls: { + Register left = i.InputRegister(1); + Operand right = i.InputOperand(0); + __ Sltu(result, left, right); + if (cc == ls) { + __ xori(result, result, 1); + } + } break; + default: + UNREACHABLE(); + } + return; + } else if (instr->arch_opcode() == kMipsCmpD || + instr->arch_opcode() == kMipsCmpS) { + FPURegister left = i.InputOrZeroDoubleRegister(0); + FPURegister right = i.InputOrZeroDoubleRegister(1); + if ((left == kDoubleRegZero || right == kDoubleRegZero) && + !__ IsDoubleZeroRegSet()) { + __ Move(kDoubleRegZero, 0.0); + } + bool predicate; + FlagsConditionToConditionCmpFPU(&predicate, condition); + if (!IsMipsArchVariant(kMips32r6)) { + __ li(result, Operand(1)); + if (predicate) { + __ Movf(result, zero_reg); + } else { + __ Movt(result, zero_reg); + } + } else { + __ mfc1(result, kDoubleCompareReg); + if (predicate) { + __ And(result, result, 1); // cmp returns all 1's/0's, use only LSB. + } else { + __ Addu(result, result, 1); // Toggle result for not equal. + } + } + return; + } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { + cc = FlagsConditionToConditionCmp(condition); + DCHECK((cc == ls) || (cc == hi)); + if (cc == ls) { + __ xori(i.OutputRegister(), i.TempRegister(0), 1); + } + return; + } else { + PrintF("AssembleArchBoolean Unimplemented arch_opcode is : %d\n", + instr->arch_opcode()); + TRACE_UNIMPL(); + UNIMPLEMENTED(); + } +} + +void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { + MipsOperandConverter i(this, instr); + Register input = i.InputRegister(0); + std::vector> cases; + for (size_t index = 2; index < instr->InputCount(); index += 2) { + cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); + } + AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), + cases.data() + cases.size()); +} + +void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { + MipsOperandConverter i(this, instr); + Register input = i.InputRegister(0); + size_t const case_count = instr->InputCount() - 2; + __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count)); + __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { + return GetLabel(i.InputRpo(index + 2)); + }); +} + +void CodeGenerator::AssembleArchSelect(Instruction* instr, + FlagsCondition condition) { + UNIMPLEMENTED(); +} + +void CodeGenerator::FinishFrame(Frame* frame) { + auto call_descriptor = linkage()->GetIncomingDescriptor(); + + const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + if (!saves_fpu.is_empty()) { + frame->AlignSavedCalleeRegisterSlots(); + } + + if (!saves_fpu.is_empty()) { + int count = saves_fpu.Count(); + DCHECK_EQ(kNumCalleeSavedFPU, count); + frame->AllocateSavedCalleeRegisterSlots(count * + (kDoubleSize / kSystemPointerSize)); + } + + const RegList saves = call_descriptor->CalleeSavedRegisters(); + if (!saves.is_empty()) { + int count = saves.Count(); + frame->AllocateSavedCalleeRegisterSlots(count); + } +} + +void CodeGenerator::AssembleConstructFrame() { + auto call_descriptor = linkage()->GetIncomingDescriptor(); + if (frame_access_state()->has_frame()) { + if (call_descriptor->IsCFunctionCall()) { +#if V8_ENABLE_WEBASSEMBLY + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ Subu(sp, sp, Operand(kSystemPointerSize)); +#else + // For balance. + if (false) { +#endif // V8_ENABLE_WEBASSEMBLY + } else { + __ Push(ra, fp); + __ mov(fp, sp); + } + } else if (call_descriptor->IsJSFunctionCall()) { + __ Prologue(); + } else { + __ StubPrologue(info()->GetOutputStackFrameType()); +#if V8_ENABLE_WEBASSEMBLY + if (call_descriptor->IsWasmFunctionCall() || + call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { + __ Push(kWasmInstanceRegister); + } + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ Subu(sp, sp, Operand(kSystemPointerSize)); + } +#endif // V8_ENABLE_WEBASSEMBLY + } + } + + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); + + if (info()->is_osr()) { + // TurboFan OSR-compiled functions cannot be entered directly. + __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); + + // Unoptimized code jumps directly to this entrypoint while the unoptimized + // frame is still on the stack. Optimized code uses OSR values directly from + // the unoptimized frame. Thus, all that needs to be done is to allocate the + // remaining stack slots. + __ RecordComment("-- OSR entrypoint --"); + osr_pc_offset_ = __ pc_offset(); + required_slots -= osr_helper()->UnoptimizedFrameSlots(); + } + + const RegList saves = call_descriptor->CalleeSavedRegisters(); + const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + + if (required_slots > 0) { + DCHECK(frame_access_state()->has_frame()); +#if V8_ENABLE_WEBASSEMBLY + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { + // For WebAssembly functions with big frames we have to do the stack + // overflow check before we construct the frame. Otherwise we may not + // have enough space on the stack to call the runtime for the stack + // overflow. + Label done; + + // If the frame is bigger than the stack, we throw the stack overflow + // exception unconditionally. Thereby we can avoid the integer overflow + // check in the condition code. + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { + __ Lw( + kScratchReg, + FieldMemOperand(kWasmInstanceRegister, + WasmInstanceObject::kRealStackLimitAddressOffset)); + __ Lw(kScratchReg, MemOperand(kScratchReg)); + __ Addu(kScratchReg, kScratchReg, + Operand(required_slots * kSystemPointerSize)); + __ Branch(&done, uge, sp, Operand(kScratchReg)); + } + + __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. + ReferenceMap* reference_map = zone()->New(zone()); + RecordSafepoint(reference_map); + if (FLAG_debug_code) __ stop(); + + __ bind(&done); + } +#endif // V8_ENABLE_WEBASSEMBLY + } + + const int returns = frame()->GetReturnSlotCount(); + + // Skip callee-saved and return slots, which are pushed below. + required_slots -= saves.Count(); + required_slots -= 2 * saves_fpu.Count(); + required_slots -= returns; + if (required_slots > 0) { + __ Subu(sp, sp, Operand(required_slots * kSystemPointerSize)); + } + + // Save callee-saved FPU registers. + if (!saves_fpu.is_empty()) { + __ MultiPushFPU(saves_fpu); + } + + if (!saves.is_empty()) { + // Save callee-saved registers. + __ MultiPush(saves); + } + + if (returns != 0) { + // Create space for returns. + __ Subu(sp, sp, Operand(returns * kSystemPointerSize)); + } +} + +void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { + auto call_descriptor = linkage()->GetIncomingDescriptor(); + + const int returns = frame()->GetReturnSlotCount(); + if (returns != 0) { + __ Addu(sp, sp, Operand(returns * kSystemPointerSize)); + } + + // Restore GP registers. + const RegList saves = call_descriptor->CalleeSavedRegisters(); + if (!saves.is_empty()) { + __ MultiPop(saves); + } + + // Restore FPU registers. + const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + if (!saves_fpu.is_empty()) { + __ MultiPopFPU(saves_fpu); + } + + MipsOperandConverter g(this, nullptr); + const int parameter_slots = + static_cast(call_descriptor->ParameterSlotCount()); + + // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}. + // Check RawMachineAssembler::PopAndReturn. + if (parameter_slots != 0) { + if (additional_pop_count->IsImmediate()) { + DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0); + } else if (FLAG_debug_code) { + __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue, + g.ToRegister(additional_pop_count), + Operand(static_cast(0))); + } + } + // Functions with JS linkage have at least one parameter (the receiver). + // If {parameter_slots} == 0, it means it is a builtin with + // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping + // itself. + const bool drop_jsargs = frame_access_state()->has_frame() && + call_descriptor->IsJSFunctionCall() && + parameter_slots != 0; + + if (call_descriptor->IsCFunctionCall()) { + AssembleDeconstructFrame(); + } else if (frame_access_state()->has_frame()) { + // Canonicalize JSFunction return sites for now unless they have an variable + // number of stack slot pops. + if (additional_pop_count->IsImmediate() && + g.ToConstant(additional_pop_count).ToInt32() == 0) { + if (return_label_.is_bound()) { + __ Branch(&return_label_); + return; + } else { + __ bind(&return_label_); + } + } + if (drop_jsargs) { + // Get the actual argument count + __ Lw(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + } + AssembleDeconstructFrame(); + } + + if (drop_jsargs) { + // We must pop all arguments from the stack (including the receiver). This + // number of arguments is given by max(1 + argc_reg, parameter_slots). + if (parameter_slots > 1) { + __ li(kScratchReg, parameter_slots); + __ slt(kScratchReg2, t0, kScratchReg); + __ movn(t0, kScratchReg, kScratchReg2); + } + __ Lsa(sp, sp, t0, kSystemPointerSizeLog2, t0); + } else if (additional_pop_count->IsImmediate()) { + DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type()); + int additional_count = g.ToConstant(additional_pop_count).ToInt32(); + __ Drop(parameter_slots + additional_count); + } else { + Register pop_reg = g.ToRegister(additional_pop_count); + __ Drop(parameter_slots); + __ Lsa(sp, sp, pop_reg, kSystemPointerSizeLog2, pop_reg); + } + __ Ret(); +} + +void CodeGenerator::FinishCode() {} + +void CodeGenerator::PrepareForDeoptimizationExits( + ZoneDeque* exits) {} + +void CodeGenerator::AssembleMove(InstructionOperand* source, + InstructionOperand* destination) { + MipsOperandConverter g(this, nullptr); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + __ mov(g.ToRegister(destination), src); + } else { + __ sw(src, g.ToMemOperand(destination)); + } + } else if (source->IsStackSlot()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + MemOperand src = g.ToMemOperand(source); + if (destination->IsRegister()) { + __ lw(g.ToRegister(destination), src); + } else { + Register temp = kScratchReg; + __ lw(temp, src); + __ sw(temp, g.ToMemOperand(destination)); + } + } else if (source->IsConstant()) { + Constant src = g.ToConstant(source); + if (destination->IsRegister() || destination->IsStackSlot()) { + Register dst = + destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; + switch (src.type()) { + case Constant::kInt32: +#if V8_ENABLE_WEBASSEMBLY + if (RelocInfo::IsWasmReference(src.rmode())) + __ li(dst, Operand(src.ToInt32(), src.rmode())); + else +#endif // V8_ENABLE_WEBASSEMBLY + __ li(dst, Operand(src.ToInt32())); + break; + case Constant::kFloat32: + __ li(dst, Operand::EmbeddedNumber(src.ToFloat32())); + break; + case Constant::kInt64: + UNREACHABLE(); + case Constant::kFloat64: + __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value())); + break; + case Constant::kExternalReference: + __ li(dst, src.ToExternalReference()); + break; + case Constant::kDelayedStringConstant: + __ li(dst, src.ToDelayedStringConstant()); + break; + case Constant::kHeapObject: { + Handle src_object = src.ToHeapObject(); + RootIndex index; + if (IsMaterializableFromRoot(src_object, &index)) { + __ LoadRoot(dst, index); + } else { + __ li(dst, src_object); + } + break; + } + case Constant::kCompressedHeapObject: + UNREACHABLE(); + case Constant::kRpoNumber: + UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips. + } + if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination)); + } else if (src.type() == Constant::kFloat32) { + if (destination->IsFPStackSlot()) { + MemOperand dst = g.ToMemOperand(destination); + if (bit_cast(src.ToFloat32()) == 0) { + __ sw(zero_reg, dst); + } else { + __ li(kScratchReg, Operand(bit_cast(src.ToFloat32()))); + __ sw(kScratchReg, dst); + } + } else { + DCHECK(destination->IsFPRegister()); + FloatRegister dst = g.ToSingleRegister(destination); + __ Move(dst, src.ToFloat32()); + } + } else { + DCHECK_EQ(Constant::kFloat64, src.type()); + DoubleRegister dst = destination->IsFPRegister() + ? g.ToDoubleRegister(destination) + : kScratchDoubleReg; + __ Move(dst, src.ToFloat64().value()); + if (destination->IsFPStackSlot()) { + __ Sdc1(dst, g.ToMemOperand(destination)); + } + } + } else if (source->IsFPRegister()) { + MachineRepresentation rep = LocationOperand::cast(source)->representation(); + if (rep == MachineRepresentation::kSimd128) { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + MSARegister src = g.ToSimd128Register(source); + if (destination->IsSimd128Register()) { + MSARegister dst = g.ToSimd128Register(destination); + __ move_v(dst, src); + } else { + DCHECK(destination->IsSimd128StackSlot()); + __ st_b(src, g.ToMemOperand(destination)); + } + } else { + FPURegister src = g.ToDoubleRegister(source); + if (destination->IsFPRegister()) { + FPURegister dst = g.ToDoubleRegister(destination); + __ Move(dst, src); + } else { + DCHECK(destination->IsFPStackSlot()); + MachineRepresentation rep = + LocationOperand::cast(source)->representation(); + if (rep == MachineRepresentation::kFloat64) { + __ Sdc1(src, g.ToMemOperand(destination)); + } else if (rep == MachineRepresentation::kFloat32) { + __ swc1(src, g.ToMemOperand(destination)); + } else { + UNREACHABLE(); + } + } + } + } else if (source->IsFPStackSlot()) { + DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); + MemOperand src = g.ToMemOperand(source); + MachineRepresentation rep = LocationOperand::cast(source)->representation(); + if (destination->IsFPRegister()) { + if (rep == MachineRepresentation::kFloat64) { + __ Ldc1(g.ToDoubleRegister(destination), src); + } else if (rep == MachineRepresentation::kFloat32) { + __ lwc1(g.ToDoubleRegister(destination), src); + } else { + DCHECK_EQ(MachineRepresentation::kSimd128, rep); + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ ld_b(g.ToSimd128Register(destination), src); + } + } else { + FPURegister temp = kScratchDoubleReg; + if (rep == MachineRepresentation::kFloat64) { + __ Ldc1(temp, src); + __ Sdc1(temp, g.ToMemOperand(destination)); + } else if (rep == MachineRepresentation::kFloat32) { + __ lwc1(temp, src); + __ swc1(temp, g.ToMemOperand(destination)); + } else { + DCHECK_EQ(MachineRepresentation::kSimd128, rep); + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + MSARegister temp = kSimd128ScratchReg; + __ ld_b(temp, src); + __ st_b(temp, g.ToMemOperand(destination)); + } + } + } else { + UNREACHABLE(); + } +} + +void CodeGenerator::AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) { + MipsOperandConverter g(this, nullptr); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + // Register-register. + Register temp = kScratchReg; + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ Move(temp, src); + __ Move(src, dst); + __ Move(dst, temp); + } else { + DCHECK(destination->IsStackSlot()); + MemOperand dst = g.ToMemOperand(destination); + __ mov(temp, src); + __ lw(src, dst); + __ sw(temp, dst); + } + } else if (source->IsStackSlot()) { + DCHECK(destination->IsStackSlot()); + Register temp_0 = kScratchReg; + Register temp_1 = kScratchReg2; + MemOperand src = g.ToMemOperand(source); + MemOperand dst = g.ToMemOperand(destination); + __ lw(temp_0, src); + __ lw(temp_1, dst); + __ sw(temp_0, dst); + __ sw(temp_1, src); + } else if (source->IsFPRegister()) { + if (destination->IsFPRegister()) { + MachineRepresentation rep = + LocationOperand::cast(source)->representation(); + if (rep == MachineRepresentation::kSimd128) { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + MSARegister temp = kSimd128ScratchReg; + MSARegister src = g.ToSimd128Register(source); + MSARegister dst = g.ToSimd128Register(destination); + __ move_v(temp, src); + __ move_v(src, dst); + __ move_v(dst, temp); + } else { + FPURegister temp = kScratchDoubleReg; + FPURegister src = g.ToDoubleRegister(source); + FPURegister dst = g.ToDoubleRegister(destination); + __ Move(temp, src); + __ Move(src, dst); + __ Move(dst, temp); + } + } else { + DCHECK(destination->IsFPStackSlot()); + MemOperand dst = g.ToMemOperand(destination); + MachineRepresentation rep = + LocationOperand::cast(source)->representation(); + if (rep == MachineRepresentation::kFloat64) { + FPURegister temp = kScratchDoubleReg; + FPURegister src = g.ToDoubleRegister(source); + __ Move(temp, src); + __ Ldc1(src, dst); + __ Sdc1(temp, dst); + } else if (rep == MachineRepresentation::kFloat32) { + FPURegister temp = kScratchDoubleReg; + FPURegister src = g.ToFloatRegister(source); + __ Move(temp, src); + __ lwc1(src, dst); + __ swc1(temp, dst); + } else { + DCHECK_EQ(MachineRepresentation::kSimd128, rep); + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + MSARegister temp = kSimd128ScratchReg; + MSARegister src = g.ToSimd128Register(source); + __ move_v(temp, src); + __ ld_b(src, dst); + __ st_b(temp, dst); + } + } + } else if (source->IsFPStackSlot()) { + DCHECK(destination->IsFPStackSlot()); + Register temp_0 = kScratchReg; + FPURegister temp_1 = kScratchDoubleReg; + MemOperand src0 = g.ToMemOperand(source); + MemOperand dst0 = g.ToMemOperand(destination); + MachineRepresentation rep = LocationOperand::cast(source)->representation(); + if (rep == MachineRepresentation::kFloat64) { + MemOperand src1(src0.rm(), src0.offset() + kIntSize); + MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); + __ Ldc1(temp_1, dst0); // Save destination in temp_1. + __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. + __ sw(temp_0, dst0); + __ lw(temp_0, src1); + __ sw(temp_0, dst1); + __ Sdc1(temp_1, src0); + } else if (rep == MachineRepresentation::kFloat32) { + __ lwc1(temp_1, dst0); // Save destination in temp_1. + __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. + __ sw(temp_0, dst0); + __ swc1(temp_1, src0); + } else { + DCHECK_EQ(MachineRepresentation::kSimd128, rep); + MemOperand src1(src0.rm(), src0.offset() + kIntSize); + MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); + MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize); + MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize); + MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize); + MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize); + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + MSARegister temp_1 = kSimd128ScratchReg; + __ ld_b(temp_1, dst0); // Save destination in temp_1. + __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. + __ sw(temp_0, dst0); + __ lw(temp_0, src1); + __ sw(temp_0, dst1); + __ lw(temp_0, src2); + __ sw(temp_0, dst2); + __ lw(temp_0, src3); + __ sw(temp_0, dst3); + __ st_b(temp_1, src0); + } + } else { + // No other combinations are possible. + UNREACHABLE(); + } +} + +void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { + // On 32-bit MIPS we emit the jump tables inline. + UNREACHABLE(); +} + +#undef __ +#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP +#undef ASSEMBLE_SIMD_EXTENDED_MULTIPLY + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h new file mode 100644 index 00000000000000..a0e00d0e4564b9 --- /dev/null +++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h @@ -0,0 +1,402 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_ +#define V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// MIPS-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. + +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(MipsAdd) \ + V(MipsAddOvf) \ + V(MipsSub) \ + V(MipsSubOvf) \ + V(MipsMul) \ + V(MipsMulOvf) \ + V(MipsMulHigh) \ + V(MipsMulHighU) \ + V(MipsDiv) \ + V(MipsDivU) \ + V(MipsMod) \ + V(MipsModU) \ + V(MipsAnd) \ + V(MipsOr) \ + V(MipsNor) \ + V(MipsXor) \ + V(MipsClz) \ + V(MipsCtz) \ + V(MipsPopcnt) \ + V(MipsLsa) \ + V(MipsShl) \ + V(MipsShr) \ + V(MipsSar) \ + V(MipsShlPair) \ + V(MipsShrPair) \ + V(MipsSarPair) \ + V(MipsExt) \ + V(MipsIns) \ + V(MipsRor) \ + V(MipsMov) \ + V(MipsTst) \ + V(MipsCmp) \ + V(MipsCmpS) \ + V(MipsAddS) \ + V(MipsSubS) \ + V(MipsMulS) \ + V(MipsDivS) \ + V(MipsAbsS) \ + V(MipsSqrtS) \ + V(MipsMaxS) \ + V(MipsMinS) \ + V(MipsCmpD) \ + V(MipsAddD) \ + V(MipsSubD) \ + V(MipsMulD) \ + V(MipsDivD) \ + V(MipsModD) \ + V(MipsAbsD) \ + V(MipsSqrtD) \ + V(MipsMaxD) \ + V(MipsMinD) \ + V(MipsNegS) \ + V(MipsNegD) \ + V(MipsAddPair) \ + V(MipsSubPair) \ + V(MipsMulPair) \ + V(MipsMaddS) \ + V(MipsMaddD) \ + V(MipsMsubS) \ + V(MipsMsubD) \ + V(MipsFloat32RoundDown) \ + V(MipsFloat32RoundTruncate) \ + V(MipsFloat32RoundUp) \ + V(MipsFloat32RoundTiesEven) \ + V(MipsFloat64RoundDown) \ + V(MipsFloat64RoundTruncate) \ + V(MipsFloat64RoundUp) \ + V(MipsFloat64RoundTiesEven) \ + V(MipsCvtSD) \ + V(MipsCvtDS) \ + V(MipsTruncWD) \ + V(MipsRoundWD) \ + V(MipsFloorWD) \ + V(MipsCeilWD) \ + V(MipsTruncWS) \ + V(MipsRoundWS) \ + V(MipsFloorWS) \ + V(MipsCeilWS) \ + V(MipsTruncUwD) \ + V(MipsTruncUwS) \ + V(MipsCvtDW) \ + V(MipsCvtDUw) \ + V(MipsCvtSW) \ + V(MipsCvtSUw) \ + V(MipsLb) \ + V(MipsLbu) \ + V(MipsSb) \ + V(MipsLh) \ + V(MipsUlh) \ + V(MipsLhu) \ + V(MipsUlhu) \ + V(MipsSh) \ + V(MipsUsh) \ + V(MipsLw) \ + V(MipsUlw) \ + V(MipsSw) \ + V(MipsUsw) \ + V(MipsLwc1) \ + V(MipsUlwc1) \ + V(MipsSwc1) \ + V(MipsUswc1) \ + V(MipsLdc1) \ + V(MipsUldc1) \ + V(MipsSdc1) \ + V(MipsUsdc1) \ + V(MipsFloat64ExtractLowWord32) \ + V(MipsFloat64ExtractHighWord32) \ + V(MipsFloat64InsertLowWord32) \ + V(MipsFloat64InsertHighWord32) \ + V(MipsFloat64SilenceNaN) \ + V(MipsFloat32Max) \ + V(MipsFloat64Max) \ + V(MipsFloat32Min) \ + V(MipsFloat64Min) \ + V(MipsPush) \ + V(MipsPeek) \ + V(MipsStoreToStackSlot) \ + V(MipsByteSwap32) \ + V(MipsStackClaim) \ + V(MipsSeb) \ + V(MipsSeh) \ + V(MipsSync) \ + V(MipsS128Zero) \ + V(MipsI32x4Splat) \ + V(MipsI32x4ExtractLane) \ + V(MipsI32x4ReplaceLane) \ + V(MipsI32x4Add) \ + V(MipsI32x4Sub) \ + V(MipsF64x2Abs) \ + V(MipsF64x2Neg) \ + V(MipsF64x2Sqrt) \ + V(MipsF64x2Add) \ + V(MipsF64x2Sub) \ + V(MipsF64x2Mul) \ + V(MipsF64x2Div) \ + V(MipsF64x2Min) \ + V(MipsF64x2Max) \ + V(MipsF64x2Eq) \ + V(MipsF64x2Ne) \ + V(MipsF64x2Lt) \ + V(MipsF64x2Le) \ + V(MipsF64x2Pmin) \ + V(MipsF64x2Pmax) \ + V(MipsF64x2Ceil) \ + V(MipsF64x2Floor) \ + V(MipsF64x2Trunc) \ + V(MipsF64x2NearestInt) \ + V(MipsF64x2ConvertLowI32x4S) \ + V(MipsF64x2ConvertLowI32x4U) \ + V(MipsF64x2PromoteLowF32x4) \ + V(MipsI64x2Add) \ + V(MipsI64x2Sub) \ + V(MipsI64x2Mul) \ + V(MipsI64x2Neg) \ + V(MipsI64x2Shl) \ + V(MipsI64x2ShrS) \ + V(MipsI64x2ShrU) \ + V(MipsI64x2BitMask) \ + V(MipsI64x2Eq) \ + V(MipsI64x2Ne) \ + V(MipsI64x2GtS) \ + V(MipsI64x2GeS) \ + V(MipsI64x2Abs) \ + V(MipsI64x2SConvertI32x4Low) \ + V(MipsI64x2SConvertI32x4High) \ + V(MipsI64x2UConvertI32x4Low) \ + V(MipsI64x2UConvertI32x4High) \ + V(MipsI64x2ExtMulLowI32x4S) \ + V(MipsI64x2ExtMulHighI32x4S) \ + V(MipsI64x2ExtMulLowI32x4U) \ + V(MipsI64x2ExtMulHighI32x4U) \ + V(MipsF32x4Splat) \ + V(MipsF32x4ExtractLane) \ + V(MipsF32x4ReplaceLane) \ + V(MipsF32x4SConvertI32x4) \ + V(MipsF32x4UConvertI32x4) \ + V(MipsF32x4DemoteF64x2Zero) \ + V(MipsI32x4Mul) \ + V(MipsI32x4MaxS) \ + V(MipsI32x4MinS) \ + V(MipsI32x4Eq) \ + V(MipsI32x4Ne) \ + V(MipsI32x4Shl) \ + V(MipsI32x4ShrS) \ + V(MipsI32x4ShrU) \ + V(MipsI32x4MaxU) \ + V(MipsI32x4MinU) \ + V(MipsF64x2Splat) \ + V(MipsF64x2ExtractLane) \ + V(MipsF64x2ReplaceLane) \ + V(MipsF32x4Abs) \ + V(MipsF32x4Neg) \ + V(MipsF32x4Sqrt) \ + V(MipsF32x4RecipApprox) \ + V(MipsF32x4RecipSqrtApprox) \ + V(MipsF32x4Add) \ + V(MipsF32x4Sub) \ + V(MipsF32x4Mul) \ + V(MipsF32x4Div) \ + V(MipsF32x4Max) \ + V(MipsF32x4Min) \ + V(MipsF32x4Eq) \ + V(MipsF32x4Ne) \ + V(MipsF32x4Lt) \ + V(MipsF32x4Le) \ + V(MipsF32x4Pmin) \ + V(MipsF32x4Pmax) \ + V(MipsF32x4Ceil) \ + V(MipsF32x4Floor) \ + V(MipsF32x4Trunc) \ + V(MipsF32x4NearestInt) \ + V(MipsI32x4SConvertF32x4) \ + V(MipsI32x4UConvertF32x4) \ + V(MipsI32x4Neg) \ + V(MipsI32x4GtS) \ + V(MipsI32x4GeS) \ + V(MipsI32x4GtU) \ + V(MipsI32x4GeU) \ + V(MipsI32x4Abs) \ + V(MipsI32x4BitMask) \ + V(MipsI32x4DotI16x8S) \ + V(MipsI32x4ExtMulLowI16x8S) \ + V(MipsI32x4ExtMulHighI16x8S) \ + V(MipsI32x4ExtMulLowI16x8U) \ + V(MipsI32x4ExtMulHighI16x8U) \ + V(MipsI32x4TruncSatF64x2SZero) \ + V(MipsI32x4TruncSatF64x2UZero) \ + V(MipsI32x4ExtAddPairwiseI16x8S) \ + V(MipsI32x4ExtAddPairwiseI16x8U) \ + V(MipsI16x8Splat) \ + V(MipsI16x8ExtractLaneU) \ + V(MipsI16x8ExtractLaneS) \ + V(MipsI16x8ReplaceLane) \ + V(MipsI16x8Neg) \ + V(MipsI16x8Shl) \ + V(MipsI16x8ShrS) \ + V(MipsI16x8ShrU) \ + V(MipsI16x8Add) \ + V(MipsI16x8AddSatS) \ + V(MipsI16x8Sub) \ + V(MipsI16x8SubSatS) \ + V(MipsI16x8Mul) \ + V(MipsI16x8MaxS) \ + V(MipsI16x8MinS) \ + V(MipsI16x8Eq) \ + V(MipsI16x8Ne) \ + V(MipsI16x8GtS) \ + V(MipsI16x8GeS) \ + V(MipsI16x8AddSatU) \ + V(MipsI16x8SubSatU) \ + V(MipsI16x8MaxU) \ + V(MipsI16x8MinU) \ + V(MipsI16x8GtU) \ + V(MipsI16x8GeU) \ + V(MipsI16x8RoundingAverageU) \ + V(MipsI16x8Abs) \ + V(MipsI16x8BitMask) \ + V(MipsI16x8Q15MulRSatS) \ + V(MipsI16x8ExtMulLowI8x16S) \ + V(MipsI16x8ExtMulHighI8x16S) \ + V(MipsI16x8ExtMulLowI8x16U) \ + V(MipsI16x8ExtMulHighI8x16U) \ + V(MipsI16x8ExtAddPairwiseI8x16S) \ + V(MipsI16x8ExtAddPairwiseI8x16U) \ + V(MipsI8x16Splat) \ + V(MipsI8x16ExtractLaneU) \ + V(MipsI8x16ExtractLaneS) \ + V(MipsI8x16ReplaceLane) \ + V(MipsI8x16Neg) \ + V(MipsI8x16Shl) \ + V(MipsI8x16ShrS) \ + V(MipsI8x16Add) \ + V(MipsI8x16AddSatS) \ + V(MipsI8x16Sub) \ + V(MipsI8x16SubSatS) \ + V(MipsI8x16MaxS) \ + V(MipsI8x16MinS) \ + V(MipsI8x16Eq) \ + V(MipsI8x16Ne) \ + V(MipsI8x16GtS) \ + V(MipsI8x16GeS) \ + V(MipsI8x16ShrU) \ + V(MipsI8x16AddSatU) \ + V(MipsI8x16SubSatU) \ + V(MipsI8x16MaxU) \ + V(MipsI8x16MinU) \ + V(MipsI8x16GtU) \ + V(MipsI8x16GeU) \ + V(MipsI8x16RoundingAverageU) \ + V(MipsI8x16Abs) \ + V(MipsI8x16Popcnt) \ + V(MipsI8x16BitMask) \ + V(MipsS128And) \ + V(MipsS128Or) \ + V(MipsS128Xor) \ + V(MipsS128Not) \ + V(MipsS128Select) \ + V(MipsS128AndNot) \ + V(MipsI64x2AllTrue) \ + V(MipsI32x4AllTrue) \ + V(MipsI16x8AllTrue) \ + V(MipsI8x16AllTrue) \ + V(MipsV128AnyTrue) \ + V(MipsS32x4InterleaveRight) \ + V(MipsS32x4InterleaveLeft) \ + V(MipsS32x4PackEven) \ + V(MipsS32x4PackOdd) \ + V(MipsS32x4InterleaveEven) \ + V(MipsS32x4InterleaveOdd) \ + V(MipsS32x4Shuffle) \ + V(MipsS16x8InterleaveRight) \ + V(MipsS16x8InterleaveLeft) \ + V(MipsS16x8PackEven) \ + V(MipsS16x8PackOdd) \ + V(MipsS16x8InterleaveEven) \ + V(MipsS16x8InterleaveOdd) \ + V(MipsS16x4Reverse) \ + V(MipsS16x2Reverse) \ + V(MipsS8x16InterleaveRight) \ + V(MipsS8x16InterleaveLeft) \ + V(MipsS8x16PackEven) \ + V(MipsS8x16PackOdd) \ + V(MipsS8x16InterleaveEven) \ + V(MipsS8x16InterleaveOdd) \ + V(MipsI8x16Shuffle) \ + V(MipsI8x16Swizzle) \ + V(MipsS8x16Concat) \ + V(MipsS8x8Reverse) \ + V(MipsS8x4Reverse) \ + V(MipsS8x2Reverse) \ + V(MipsS128Load8Splat) \ + V(MipsS128Load16Splat) \ + V(MipsS128Load32Splat) \ + V(MipsS128Load64Splat) \ + V(MipsS128Load8x8S) \ + V(MipsS128Load8x8U) \ + V(MipsS128Load16x4S) \ + V(MipsS128Load16x4U) \ + V(MipsS128Load32x2S) \ + V(MipsS128Load32x2U) \ + V(MipsMsaLd) \ + V(MipsMsaSt) \ + V(MipsI32x4SConvertI16x8Low) \ + V(MipsI32x4SConvertI16x8High) \ + V(MipsI32x4UConvertI16x8Low) \ + V(MipsI32x4UConvertI16x8High) \ + V(MipsI16x8SConvertI8x16Low) \ + V(MipsI16x8SConvertI8x16High) \ + V(MipsI16x8SConvertI32x4) \ + V(MipsI16x8UConvertI32x4) \ + V(MipsI16x8UConvertI8x16Low) \ + V(MipsI16x8UConvertI8x16High) \ + V(MipsI8x16SConvertI16x8) \ + V(MipsI8x16UConvertI16x8) \ + V(MipsWord32AtomicPairLoad) \ + V(MipsWord32AtomicPairStore) \ + V(MipsWord32AtomicPairAdd) \ + V(MipsWord32AtomicPairSub) \ + V(MipsWord32AtomicPairAnd) \ + V(MipsWord32AtomicPairOr) \ + V(MipsWord32AtomicPairXor) \ + V(MipsWord32AtomicPairExchange) \ + V(MipsWord32AtomicPairCompareExchange) + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +// +// We use the following local notation for addressing modes: +// +// R = register +// O = register or stack slot +// D = double register +// I = immediate (handle, external, int32) +// MRI = [register + immediate] +// MRR = [register + register] +// TODO(plind): Add the new r6 address modes. +#define TARGET_ADDRESSING_MODE_LIST(V) \ + V(MRI) /* [%r0 + K] */ \ + V(MRR) /* [%r0 + %r1] */ + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_ diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc new file mode 100644 index 00000000000000..d59392b40a3f08 --- /dev/null +++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc @@ -0,0 +1,1806 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/backend/code-generator.h" +#include "src/compiler/backend/instruction-scheduler.h" + +namespace v8 { +namespace internal { +namespace compiler { + +bool InstructionScheduler::SchedulerSupported() { return true; } + +int InstructionScheduler::GetTargetInstructionFlags( + const Instruction* instr) const { + switch (instr->arch_opcode()) { + case kMipsAbsD: + case kMipsAbsS: + case kMipsAdd: + case kMipsAddD: + case kMipsAddOvf: + case kMipsAddPair: + case kMipsAddS: + case kMipsAnd: + case kMipsByteSwap32: + case kMipsCeilWD: + case kMipsCeilWS: + case kMipsClz: + case kMipsCmp: + case kMipsCmpD: + case kMipsCmpS: + case kMipsCtz: + case kMipsCvtDS: + case kMipsCvtDUw: + case kMipsCvtDW: + case kMipsCvtSD: + case kMipsCvtSUw: + case kMipsCvtSW: + case kMipsDiv: + case kMipsDivD: + case kMipsDivS: + case kMipsDivU: + case kMipsExt: + case kMipsF64x2Abs: + case kMipsF64x2Neg: + case kMipsF64x2Sqrt: + case kMipsF64x2Add: + case kMipsF64x2Sub: + case kMipsF64x2Mul: + case kMipsF64x2Div: + case kMipsF64x2Min: + case kMipsF64x2Max: + case kMipsF64x2Eq: + case kMipsF64x2Ne: + case kMipsF64x2Lt: + case kMipsF64x2Le: + case kMipsF64x2Splat: + case kMipsF64x2ExtractLane: + case kMipsF64x2ReplaceLane: + case kMipsF64x2Pmin: + case kMipsF64x2Pmax: + case kMipsF64x2Ceil: + case kMipsF64x2Floor: + case kMipsF64x2Trunc: + case kMipsF64x2NearestInt: + case kMipsF64x2ConvertLowI32x4S: + case kMipsF64x2ConvertLowI32x4U: + case kMipsF64x2PromoteLowF32x4: + case kMipsI64x2Add: + case kMipsI64x2Sub: + case kMipsI64x2Mul: + case kMipsI64x2Neg: + case kMipsI64x2Shl: + case kMipsI64x2ShrS: + case kMipsI64x2ShrU: + case kMipsI64x2BitMask: + case kMipsI64x2Eq: + case kMipsI64x2Ne: + case kMipsI64x2GtS: + case kMipsI64x2GeS: + case kMipsI64x2Abs: + case kMipsI64x2SConvertI32x4Low: + case kMipsI64x2SConvertI32x4High: + case kMipsI64x2UConvertI32x4Low: + case kMipsI64x2UConvertI32x4High: + case kMipsI64x2ExtMulLowI32x4S: + case kMipsI64x2ExtMulHighI32x4S: + case kMipsI64x2ExtMulLowI32x4U: + case kMipsI64x2ExtMulHighI32x4U: + case kMipsF32x4Abs: + case kMipsF32x4Add: + case kMipsF32x4Eq: + case kMipsF32x4ExtractLane: + case kMipsF32x4Le: + case kMipsF32x4Lt: + case kMipsF32x4Max: + case kMipsF32x4Min: + case kMipsF32x4Mul: + case kMipsF32x4Div: + case kMipsF32x4Ne: + case kMipsF32x4Neg: + case kMipsF32x4Sqrt: + case kMipsF32x4RecipApprox: + case kMipsF32x4RecipSqrtApprox: + case kMipsF32x4ReplaceLane: + case kMipsF32x4SConvertI32x4: + case kMipsF32x4Splat: + case kMipsF32x4Sub: + case kMipsF32x4UConvertI32x4: + case kMipsF32x4Pmin: + case kMipsF32x4Pmax: + case kMipsF32x4Ceil: + case kMipsF32x4Floor: + case kMipsF32x4Trunc: + case kMipsF32x4NearestInt: + case kMipsF32x4DemoteF64x2Zero: + case kMipsFloat32Max: + case kMipsFloat32Min: + case kMipsFloat32RoundDown: + case kMipsFloat32RoundTiesEven: + case kMipsFloat32RoundTruncate: + case kMipsFloat32RoundUp: + case kMipsFloat64ExtractHighWord32: + case kMipsFloat64ExtractLowWord32: + case kMipsFloat64InsertHighWord32: + case kMipsFloat64InsertLowWord32: + case kMipsFloat64Max: + case kMipsFloat64Min: + case kMipsFloat64RoundDown: + case kMipsFloat64RoundTiesEven: + case kMipsFloat64RoundTruncate: + case kMipsFloat64RoundUp: + case kMipsFloat64SilenceNaN: + case kMipsFloorWD: + case kMipsFloorWS: + case kMipsI16x8Add: + case kMipsI16x8AddSatS: + case kMipsI16x8AddSatU: + case kMipsI16x8Eq: + case kMipsI16x8ExtractLaneU: + case kMipsI16x8ExtractLaneS: + case kMipsI16x8GeS: + case kMipsI16x8GeU: + case kMipsI16x8RoundingAverageU: + case kMipsI16x8GtS: + case kMipsI16x8GtU: + case kMipsI16x8MaxS: + case kMipsI16x8MaxU: + case kMipsI16x8MinS: + case kMipsI16x8MinU: + case kMipsI16x8Mul: + case kMipsI16x8Ne: + case kMipsI16x8Neg: + case kMipsI16x8ReplaceLane: + case kMipsI16x8SConvertI32x4: + case kMipsI16x8SConvertI8x16High: + case kMipsI16x8SConvertI8x16Low: + case kMipsI16x8Shl: + case kMipsI16x8ShrS: + case kMipsI16x8ShrU: + case kMipsI16x8Splat: + case kMipsI16x8Sub: + case kMipsI16x8SubSatS: + case kMipsI16x8SubSatU: + case kMipsI16x8UConvertI32x4: + case kMipsI16x8UConvertI8x16High: + case kMipsI16x8UConvertI8x16Low: + case kMipsI16x8Abs: + case kMipsI16x8BitMask: + case kMipsI16x8Q15MulRSatS: + case kMipsI16x8ExtMulLowI8x16S: + case kMipsI16x8ExtMulHighI8x16S: + case kMipsI16x8ExtMulLowI8x16U: + case kMipsI16x8ExtMulHighI8x16U: + case kMipsI16x8ExtAddPairwiseI8x16S: + case kMipsI16x8ExtAddPairwiseI8x16U: + case kMipsI32x4ExtAddPairwiseI16x8S: + case kMipsI32x4ExtAddPairwiseI16x8U: + case kMipsI32x4Add: + case kMipsI32x4Eq: + case kMipsI32x4ExtractLane: + case kMipsI32x4GeS: + case kMipsI32x4GeU: + case kMipsI32x4GtS: + case kMipsI32x4GtU: + case kMipsI32x4MaxS: + case kMipsI32x4MaxU: + case kMipsI32x4MinS: + case kMipsI32x4MinU: + case kMipsI32x4Mul: + case kMipsI32x4Ne: + case kMipsI32x4Neg: + case kMipsI32x4ReplaceLane: + case kMipsI32x4SConvertF32x4: + case kMipsI32x4SConvertI16x8High: + case kMipsI32x4SConvertI16x8Low: + case kMipsI32x4Shl: + case kMipsI32x4ShrS: + case kMipsI32x4ShrU: + case kMipsI32x4Splat: + case kMipsI32x4Sub: + case kMipsI32x4UConvertF32x4: + case kMipsI32x4UConvertI16x8High: + case kMipsI32x4UConvertI16x8Low: + case kMipsI32x4Abs: + case kMipsI32x4BitMask: + case kMipsI32x4DotI16x8S: + case kMipsI32x4ExtMulLowI16x8S: + case kMipsI32x4ExtMulHighI16x8S: + case kMipsI32x4ExtMulLowI16x8U: + case kMipsI32x4ExtMulHighI16x8U: + case kMipsI32x4TruncSatF64x2SZero: + case kMipsI32x4TruncSatF64x2UZero: + case kMipsI8x16Add: + case kMipsI8x16AddSatS: + case kMipsI8x16AddSatU: + case kMipsI8x16Eq: + case kMipsI8x16ExtractLaneU: + case kMipsI8x16ExtractLaneS: + case kMipsI8x16GeS: + case kMipsI8x16GeU: + case kMipsI8x16RoundingAverageU: + case kMipsI8x16GtS: + case kMipsI8x16GtU: + case kMipsI8x16MaxS: + case kMipsI8x16MaxU: + case kMipsI8x16MinS: + case kMipsI8x16MinU: + case kMipsI8x16Ne: + case kMipsI8x16Neg: + case kMipsI8x16ReplaceLane: + case kMipsI8x16SConvertI16x8: + case kMipsI8x16Shl: + case kMipsI8x16ShrS: + case kMipsI8x16ShrU: + case kMipsI8x16Splat: + case kMipsI8x16Sub: + case kMipsI8x16SubSatS: + case kMipsI8x16SubSatU: + case kMipsI8x16UConvertI16x8: + case kMipsI8x16Abs: + case kMipsI8x16Popcnt: + case kMipsI8x16BitMask: + case kMipsIns: + case kMipsLsa: + case kMipsMaddD: + case kMipsMaddS: + case kMipsMaxD: + case kMipsMaxS: + case kMipsMinD: + case kMipsMinS: + case kMipsMod: + case kMipsModU: + case kMipsMov: + case kMipsMsubD: + case kMipsMsubS: + case kMipsMul: + case kMipsMulD: + case kMipsMulHigh: + case kMipsMulHighU: + case kMipsMulOvf: + case kMipsMulPair: + case kMipsMulS: + case kMipsNegD: + case kMipsNegS: + case kMipsNor: + case kMipsOr: + case kMipsPopcnt: + case kMipsRor: + case kMipsRoundWD: + case kMipsRoundWS: + case kMipsS128And: + case kMipsS128Not: + case kMipsS128Or: + case kMipsS128Select: + case kMipsS128Xor: + case kMipsS128Zero: + case kMipsS128AndNot: + case kMipsS16x2Reverse: + case kMipsS16x4Reverse: + case kMipsS16x8InterleaveEven: + case kMipsS16x8InterleaveLeft: + case kMipsS16x8InterleaveOdd: + case kMipsS16x8InterleaveRight: + case kMipsS16x8PackEven: + case kMipsS16x8PackOdd: + case kMipsI64x2AllTrue: + case kMipsI32x4AllTrue: + case kMipsI16x8AllTrue: + case kMipsI8x16AllTrue: + case kMipsV128AnyTrue: + case kMipsS32x4InterleaveEven: + case kMipsS32x4InterleaveLeft: + case kMipsS32x4InterleaveOdd: + case kMipsS32x4InterleaveRight: + case kMipsS32x4PackEven: + case kMipsS32x4PackOdd: + case kMipsS32x4Shuffle: + case kMipsS8x16Concat: + case kMipsS8x16InterleaveEven: + case kMipsS8x16InterleaveLeft: + case kMipsS8x16InterleaveOdd: + case kMipsS8x16InterleaveRight: + case kMipsS8x16PackEven: + case kMipsS8x16PackOdd: + case kMipsI8x16Shuffle: + case kMipsI8x16Swizzle: + case kMipsS8x2Reverse: + case kMipsS8x4Reverse: + case kMipsS8x8Reverse: + case kMipsSar: + case kMipsSarPair: + case kMipsSeb: + case kMipsSeh: + case kMipsShl: + case kMipsShlPair: + case kMipsShr: + case kMipsShrPair: + case kMipsSqrtD: + case kMipsSqrtS: + case kMipsSub: + case kMipsSubD: + case kMipsSubOvf: + case kMipsSubPair: + case kMipsSubS: + case kMipsTruncUwD: + case kMipsTruncUwS: + case kMipsTruncWD: + case kMipsTruncWS: + case kMipsTst: + case kMipsXor: + return kNoOpcodeFlags; + + case kMipsLb: + case kMipsLbu: + case kMipsLdc1: + case kMipsLh: + case kMipsLhu: + case kMipsLw: + case kMipsLwc1: + case kMipsMsaLd: + case kMipsPeek: + case kMipsUldc1: + case kMipsUlh: + case kMipsUlhu: + case kMipsUlw: + case kMipsUlwc1: + case kMipsS128Load8Splat: + case kMipsS128Load16Splat: + case kMipsS128Load32Splat: + case kMipsS128Load64Splat: + case kMipsS128Load8x8S: + case kMipsS128Load8x8U: + case kMipsS128Load16x4S: + case kMipsS128Load16x4U: + case kMipsS128Load32x2S: + case kMipsS128Load32x2U: + case kMipsWord32AtomicPairLoad: + return kIsLoadOperation; + + case kMipsModD: + case kMipsMsaSt: + case kMipsPush: + case kMipsSb: + case kMipsSdc1: + case kMipsSh: + case kMipsStackClaim: + case kMipsStoreToStackSlot: + case kMipsSw: + case kMipsSwc1: + case kMipsUsdc1: + case kMipsUsh: + case kMipsUsw: + case kMipsUswc1: + case kMipsSync: + case kMipsWord32AtomicPairStore: + case kMipsWord32AtomicPairAdd: + case kMipsWord32AtomicPairSub: + case kMipsWord32AtomicPairAnd: + case kMipsWord32AtomicPairOr: + case kMipsWord32AtomicPairXor: + case kMipsWord32AtomicPairExchange: + case kMipsWord32AtomicPairCompareExchange: + return kHasSideEffect; + +#define CASE(Name) case k##Name: + COMMON_ARCH_OPCODE_LIST(CASE) +#undef CASE + // Already covered in architecture independent code. + UNREACHABLE(); + } + + UNREACHABLE(); +} + +enum Latency { + BRANCH = 4, // Estimated max. + RINT_S = 4, // Estimated. + RINT_D = 4, // Estimated. + + MULT = 4, + MULTU = 4, + MADD = 4, + MADDU = 4, + MSUB = 4, + MSUBU = 4, + + MUL = 7, + MULU = 7, + MUH = 7, + MUHU = 7, + + DIV = 50, // Min:11 Max:50 + DIVU = 50, + + ABS_S = 4, + ABS_D = 4, + NEG_S = 4, + NEG_D = 4, + ADD_S = 4, + ADD_D = 4, + SUB_S = 4, + SUB_D = 4, + MAX_S = 4, // Estimated. + MAX_D = 4, // Estimated. + C_cond_S = 4, + C_cond_D = 4, + MUL_S = 4, + + MADD_S = 4, + MSUB_S = 4, + NMADD_S = 4, + NMSUB_S = 4, + + CABS_cond_S = 4, + CABS_cond_D = 4, + + CVT_D_S = 4, + CVT_PS_PW = 4, + + CVT_S_W = 4, + CVT_S_L = 4, + CVT_D_W = 4, + CVT_D_L = 4, + + CVT_S_D = 4, + + CVT_W_S = 4, + CVT_W_D = 4, + CVT_L_S = 4, + CVT_L_D = 4, + + CEIL_W_S = 4, + CEIL_W_D = 4, + CEIL_L_S = 4, + CEIL_L_D = 4, + + FLOOR_W_S = 4, + FLOOR_W_D = 4, + FLOOR_L_S = 4, + FLOOR_L_D = 4, + + ROUND_W_S = 4, + ROUND_W_D = 4, + ROUND_L_S = 4, + ROUND_L_D = 4, + + TRUNC_W_S = 4, + TRUNC_W_D = 4, + TRUNC_L_S = 4, + TRUNC_L_D = 4, + + MOV_S = 4, + MOV_D = 4, + + MOVF_S = 4, + MOVF_D = 4, + + MOVN_S = 4, + MOVN_D = 4, + + MOVT_S = 4, + MOVT_D = 4, + + MOVZ_S = 4, + MOVZ_D = 4, + + MUL_D = 5, + MADD_D = 5, + MSUB_D = 5, + NMADD_D = 5, + NMSUB_D = 5, + + RECIP_S = 13, + RECIP_D = 26, + + RSQRT_S = 17, + RSQRT_D = 36, + + DIV_S = 17, + SQRT_S = 17, + + DIV_D = 32, + SQRT_D = 32, + + MTC1 = 4, + MTHC1 = 4, + DMTC1 = 4, + LWC1 = 4, + LDC1 = 4, + LDXC1 = 4, + LUXC1 = 4, + LWXC1 = 4, + + MFC1 = 1, + MFHC1 = 1, + MFHI = 1, + MFLO = 1, + DMFC1 = 1, + SWC1 = 1, + SDC1 = 1, + SDXC1 = 1, + SUXC1 = 1, + SWXC1 = 1, +}; + +int ClzLatency() { + if (IsMipsArchVariant(kLoongson)) { + return (6 + 2 * Latency::BRANCH); + } else { + return 1; + } +} + +int RorLatency(bool is_operand_register = true) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + if (is_operand_register) { + return 4; + } else { + return 3; // Estimated max. + } + } +} + +int AdduLatency(bool is_operand_register = true) { + if (is_operand_register) { + return 1; + } else { + return 2; // Estimated max. + } +} + +int XorLatency(bool is_operand_register = true) { + return AdduLatency(is_operand_register); +} + +int AndLatency(bool is_operand_register = true) { + return AdduLatency(is_operand_register); +} + +int OrLatency(bool is_operand_register = true) { + return AdduLatency(is_operand_register); +} + +int SubuLatency(bool is_operand_register = true) { + return AdduLatency(is_operand_register); +} + +int MulLatency(bool is_operand_register = true) { + if (is_operand_register) { + if (IsMipsArchVariant(kLoongson)) { + return Latency::MULT + 1; + } else { + return Latency::MUL + 1; + } + } else { + if (IsMipsArchVariant(kLoongson)) { + return Latency::MULT + 2; + } else { + return Latency::MUL + 2; + } + } +} + +int NorLatency(bool is_operand_register = true) { + if (is_operand_register) { + return 1; + } else { + return 2; + } +} + +int InsLatency() { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return SubuLatency(false) + 7; + } +} + +int ShlPairLatency(bool is_operand_register = true) { + if (is_operand_register) { + int latency = + AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + 4; + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + return latency + Latency::BRANCH + 2; + } else { + return latency + 2; + } + } else { + return 2; + } +} + +int ShrPairLatency(bool is_operand_register = true, uint32_t shift = 0) { + if (is_operand_register) { + int latency = + AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + 4; + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + return latency + Latency::BRANCH + 2; + } else { + return latency + 2; + } + } else { + // Estimated max. + return (InsLatency() + 2 > OrLatency() + 3) ? InsLatency() + 2 + : OrLatency() + 3; + } +} + +int SarPairLatency(bool is_operand_register = true, uint32_t shift = 0) { + if (is_operand_register) { + return AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + + Latency::BRANCH + 6; + } else { + shift = shift & 0x3F; + if (shift == 0) { + return 2; + } else if (shift < 32) { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + return InsLatency() + 2; + } else { + return OrLatency() + 3; + } + } else if (shift == 32) { + return 2; + } else { + return 2; + } + } +} + +int ExtLatency() { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + // Estimated max. + return 2; + } +} + +int LsaLatency() { + // Estimated max. + return AdduLatency() + 1; +} + +int SltLatency(bool is_operand_register = true) { + if (is_operand_register) { + return 1; + } else { + return 2; // Estimated max. + } +} + +int SltuLatency(bool is_operand_register = true) { + return SltLatency(is_operand_register); +} + +int AddPairLatency() { return 3 * AdduLatency() + SltLatency(); } + +int SubPairLatency() { return SltuLatency() + 3 * SubuLatency(); } + +int MuluLatency(bool is_operand_register = true) { + int latency = 0; + if (!is_operand_register) latency++; + if (!IsMipsArchVariant(kMips32r6)) { + return latency + Latency::MULTU + 2; + } else { + return latency + Latency::MULU + Latency::MUHU; + } +} + +int MulPairLatency() { + return MuluLatency() + 2 * MulLatency() + 2 * AdduLatency(); +} + +int MaddSLatency() { + if (IsMipsArchVariant(kMips32r2)) { + return Latency::MADD_D; + } else { + return Latency::MUL_D + Latency::ADD_D; + } +} + +int MaddDLatency() { + if (IsMipsArchVariant(kMips32r2)) { + return Latency::MADD_D; + } else { + return Latency::MUL_D + Latency::ADD_D; + } +} + +int MsubSLatency() { + if (IsMipsArchVariant(kMips32r2)) { + return Latency::MSUB_S; + } else { + return Latency::MUL_S + Latency::SUB_S; + } +} + +int MsubDLatency() { + if (IsMipsArchVariant(kMips32r2)) { + return Latency::MSUB_D; + } else { + return Latency::MUL_D + Latency::SUB_D; + } +} + +int Mfhc1Latency() { + if (IsFp32Mode()) { + return Latency::MFC1; + } else { + return 1; + } +} + +int Mthc1Latency() { + if (IsFp32Mode()) { + return Latency::MTC1; + } else { + return 1; + } +} + +int MoveLatency(bool is_double_register = true) { + if (!is_double_register) { + return Latency::MTC1 + 1; + } else { + return Mthc1Latency() + 1; // Estimated. + } +} + +int Float64RoundLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return Latency::RINT_D + 4; + } else { + // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4. + return Mfhc1Latency() + ExtLatency() + Latency::BRANCH + Latency::MOV_D + + 4 + MoveLatency() + 1 + Latency::BRANCH + Latency::CVT_D_L; + } +} + +int Float32RoundLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return Latency::RINT_S + 4; + } else { + // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4. + return Latency::MFC1 + ExtLatency() + Latency::BRANCH + Latency::MOV_S + 4 + + Latency::MFC1 + Latency::BRANCH + Latency::CVT_S_W; + } +} + +int CvtDUwLatency() { + if (IsFp64Mode()) { + return Latency::MTC1 + Mthc1Latency() + Latency::CVT_D_L; + } else { + return Latency::BRANCH + Latency::MTC1 + 1 + Latency::MTC1 + + Mthc1Latency() + Latency::CVT_D_W + Latency::BRANCH + + Latency::ADD_D + Latency::CVT_D_W; + } +} + +int CvtSUwLatency() { return CvtDUwLatency() + Latency::CVT_S_D; } + +int Floor_w_dLatency() { + if (IsMipsArchVariant(kLoongson)) { + return Mfhc1Latency() + Latency::FLOOR_W_D + Mthc1Latency(); + } else { + return Latency::FLOOR_W_D; + } +} + +int FloorWDLatency() { return Floor_w_dLatency() + Latency::MFC1; } + +int Ceil_w_dLatency() { + if (IsMipsArchVariant(kLoongson)) { + return Mfhc1Latency() + Latency::CEIL_W_D + Mthc1Latency(); + } else { + return Latency::CEIL_W_D; + } +} + +int CeilWDLatency() { return Ceil_w_dLatency() + Latency::MFC1; } + +int Round_w_dLatency() { + if (IsMipsArchVariant(kLoongson)) { + return Mfhc1Latency() + Latency::ROUND_W_D + Mthc1Latency(); + } else { + return Latency::ROUND_W_D; + } +} + +int RoundWDLatency() { return Round_w_dLatency() + Latency::MFC1; } + +int Trunc_w_dLatency() { + if (IsMipsArchVariant(kLoongson)) { + return Mfhc1Latency() + Latency::TRUNC_W_D + Mthc1Latency(); + } else { + return Latency::TRUNC_W_D; + } +} + +int MovnLatency() { + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + return Latency::BRANCH + 1; + } else { + return 1; + } +} + +int Trunc_uw_dLatency() { + return 1 + Latency::MTC1 + Mthc1Latency() + Latency::BRANCH + Latency::SUB_D + + Latency::TRUNC_W_D + Latency::MFC1 + OrLatency(false) + + Latency::BRANCH + Latency::TRUNC_W_D + Latency::MFC1; +} + +int Trunc_uw_sLatency() { + return 1 + Latency::MTC1 + Latency::BRANCH + Latency::SUB_S + + Latency::TRUNC_W_S + Latency::MFC1 + OrLatency(false) + + Latency::TRUNC_W_S + Latency::MFC1; +} + +int MovzLatency() { + if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { + return Latency::BRANCH + 1; + } else { + return 1; + } +} + +int FmoveLowLatency() { + if (IsFp32Mode()) { + return Latency::MTC1; + } else { + return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1; + } +} + +int SebLatency() { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return 2; + } +} + +int SehLatency() { + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return 2; + } +} + +int UlhuLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return 4; + } +} + +int UlhLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return 4; + } +} + +int AdjustBaseAndOffsetLatency() { + return 3; // Estimated max. +} + +int UshLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return AdjustBaseAndOffsetLatency() + 4; // Estimated max. + } +} + +int UlwLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return AdjustBaseAndOffsetLatency() + 3; // Estimated max. + } +} + +int UswLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return 1; + } else { + return AdjustBaseAndOffsetLatency() + 2; + } +} + +int Ulwc1Latency() { + if (IsMipsArchVariant(kMips32r6)) { + return Latency::LWC1; + } else { + return UlwLatency() + Latency::MTC1; + } +} + +int Uswc1Latency() { + if (IsMipsArchVariant(kMips32r6)) { + return Latency::SWC1; + } else { + return Latency::MFC1 + UswLatency(); + } +} + +int Ldc1Latency() { + int latency = AdjustBaseAndOffsetLatency() + Latency::LWC1; + if (IsFp32Mode()) { + return latency + Latency::LWC1; + } else { + return latency + 1 + Mthc1Latency(); + } +} + +int Uldc1Latency() { + if (IsMipsArchVariant(kMips32r6)) { + return Ldc1Latency(); + } else { + return 2 * UlwLatency() + Latency::MTC1 + Mthc1Latency(); + } +} + +int Sdc1Latency() { + int latency = AdjustBaseAndOffsetLatency() + Latency::SWC1; + if (IsFp32Mode()) { + return latency + Latency::SWC1; + } else { + return latency + Mfhc1Latency() + 1; + } +} + +int Usdc1Latency() { + if (IsMipsArchVariant(kMips32r6)) { + return Sdc1Latency(); + } else { + return Latency::MFC1 + 2 * UswLatency() + Mfhc1Latency(); + } +} + +int PushRegisterLatency() { return AdduLatency(false) + 1; } + +int ByteSwapSignedLatency() { + // operand_size == 4 + if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { + return 2; + } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { + return 10; + } +} + +int LlLatency(int offset) { + bool is_one_instruction = + IsMipsArchVariant(kMips32r6) ? is_int9(offset) : is_int16(offset); + if (is_one_instruction) { + return 1; + } else { + return 3; + } +} + +int ExtractBitsLatency(int size, bool sign_extend) { + int latency = 1 + ExtLatency(); + if (size == 8) { + if (sign_extend) { + return latency + SebLatency(); + } else { + return 0; + } + } else if (size == 16) { + if (sign_extend) { + return latency + SehLatency(); + } else { + return 0; + } + } else { + UNREACHABLE(); + } +} + +int NegLatency() { return 1; } + +int InsertBitsLatency() { + return RorLatency() + InsLatency() + SubuLatency(false) + NegLatency() + + RorLatency(); +} + +int ScLatency(int offset) { + bool is_one_instruction = + IsMipsArchVariant(kMips32r6) ? is_int9(offset) : is_int16(offset); + if (is_one_instruction) { + return 1; + } else { + return 3; + } +} + +int BranchShortHelperR6Latency() { + return 2; // Estimated max. +} + +int BranchShortHelperLatency() { + return SltLatency() + 2; // Estimated max. +} + +int BranchShortLatency(BranchDelaySlot bdslot = PROTECT) { + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { + return BranchShortHelperR6Latency(); + } else { + return BranchShortHelperLatency(); + } +} + +int Word32AtomicExchangeLatency(bool sign_extend, int size) { + return AdduLatency() + 1 + SubuLatency() + 2 + LlLatency(0) + + ExtractBitsLatency(size, sign_extend) + InsertBitsLatency() + + ScLatency(0) + BranchShortLatency() + 1; +} + +int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { + return AdduLatency() + 1 + SubuLatency() + 2 + LlLatency(0) + + ExtractBitsLatency(size, sign_extend) + BranchShortLatency() + 1; +} + +int AddOverflowLatency() { + return 6; // Estimated max. +} + +int SubOverflowLatency() { + return 6; // Estimated max. +} + +int MulhLatency(bool is_operand_register = true) { + if (is_operand_register) { + if (!IsMipsArchVariant(kMips32r6)) { + return Latency::MULT + Latency::MFHI; + } else { + return Latency::MUH; + } + } else { + if (!IsMipsArchVariant(kMips32r6)) { + return 1 + Latency::MULT + Latency::MFHI; + } else { + return 1 + Latency::MUH; + } + } +} + +int MulhuLatency(bool is_operand_register = true) { + if (is_operand_register) { + if (!IsMipsArchVariant(kMips32r6)) { + return Latency::MULTU + Latency::MFHI; + } else { + return Latency::MUHU; + } + } else { + if (!IsMipsArchVariant(kMips32r6)) { + return 1 + Latency::MULTU + Latency::MFHI; + } else { + return 1 + Latency::MUHU; + } + } +} + +int MulOverflowLatency() { + return MulLatency() + 4; // Estimated max. +} + +int ModLatency(bool is_operand_register = true) { + if (is_operand_register) { + if (!IsMipsArchVariant(kMips32r6)) { + return Latency::DIV + Latency::MFHI; + } else { + return 1; + } + } else { + if (!IsMipsArchVariant(kMips32r6)) { + return 1 + Latency::DIV + Latency::MFHI; + } else { + return 2; + } + } +} + +int ModuLatency(bool is_operand_register = true) { + return ModLatency(is_operand_register); +} + +int DivLatency(bool is_operand_register = true) { + if (is_operand_register) { + if (!IsMipsArchVariant(kMips32r6)) { + return Latency::DIV + Latency::MFLO; + } else { + return Latency::DIV; + } + } else { + if (!IsMipsArchVariant(kMips32r6)) { + return 1 + Latency::DIV + Latency::MFLO; + } else { + return 1 + Latency::DIV; + } + } +} + +int DivuLatency(bool is_operand_register = true) { + if (is_operand_register) { + if (!IsMipsArchVariant(kMips32r6)) { + return Latency::DIVU + Latency::MFLO; + } else { + return Latency::DIVU; + } + } else { + if (!IsMipsArchVariant(kMips32r6)) { + return 1 + Latency::DIVU + Latency::MFLO; + } else { + return 1 + Latency::DIVU; + } + } +} + +int CtzLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return RorLatency(false) + 2 + ClzLatency(); + } else { + return AdduLatency(false) + XorLatency() + AndLatency() + ClzLatency() + 1 + + SubuLatency(); + } +} + +int PopcntLatency() { + return 4 * AndLatency() + SubuLatency() + 2 * AdduLatency() + MulLatency() + + 8; +} + +int CompareFLatency() { return Latency::C_cond_S; } + +int CompareIsNanFLatency() { return CompareFLatency(); } + +int CompareIsNanF32Latency() { return CompareIsNanFLatency(); } + +int Neg_sLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return Latency::NEG_S; + } else { + // Estimated. + return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S + + Latency::MFC1 + 1 + XorLatency() + Latency::MTC1; + } +} + +int CompareIsNanF64Latency() { return CompareIsNanFLatency(); } + +int Neg_dLatency() { + if (IsMipsArchVariant(kMips32r6)) { + return Latency::NEG_D; + } else { + // Estimated. + return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D + + Mfhc1Latency() + 1 + XorLatency() + Mthc1Latency(); + } +} + +int CompareF32Latency() { return CompareFLatency(); } + +int Move_sLatency() { + return Latency::MOV_S; // Estimated max. +} + +int Float32MaxLatency() { + // Estimated max. + int latency = CompareIsNanF32Latency() + Latency::BRANCH; + if (IsMipsArchVariant(kMips32r6)) { + return latency + Latency::MAX_S; + } else { + return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() + + Latency::MFC1 + Move_sLatency(); + } +} + +int CompareF64Latency() { return CompareF32Latency(); } + +int Move_dLatency() { + return Latency::MOV_D; // Estimated max. +} + +int Float64MaxLatency() { + // Estimated max. + int latency = CompareIsNanF64Latency() + Latency::BRANCH; + if (IsMipsArchVariant(kMips32r6)) { + return latency + Latency::MAX_D; + } else { + return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() + + Latency::MFHC1 + 2 * Move_dLatency(); + } +} + +int PrepareCallCFunctionLatency() { + int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + if (frame_alignment > kSystemPointerSize) { + return 1 + SubuLatency(false) + AndLatency(false) + 1; + } else { + return SubuLatency(false); + } +} + +int MovToFloatParametersLatency() { return 2 * MoveLatency(); } + +int CallLatency() { + // Estimated. + return AdduLatency(false) + Latency::BRANCH + 3; +} + +int CallCFunctionHelperLatency() { + // Estimated. + int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency(); + if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { + latency++; + } else { + latency += AdduLatency(false); + } + return latency; +} + +int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); } + +int MovFromFloatResultLatency() { return MoveLatency(); } + +int Float32MinLatency() { + // Estimated max. + return CompareIsNanF32Latency() + Latency::BRANCH + + 2 * (CompareF32Latency() + Latency::BRANCH) + Latency::MFC1 + + 2 * Latency::BRANCH + Move_sLatency(); +} + +int Float64MinLatency() { + // Estimated max. + return CompareIsNanF64Latency() + Latency::BRANCH + + 2 * (CompareF64Latency() + Latency::BRANCH) + Mfhc1Latency() + + 2 * Latency::BRANCH + Move_dLatency(); +} + +int SmiUntagLatency() { return 1; } + +int PrepareForTailCallLatency() { + // Estimated max. + return 2 * (LsaLatency() + AdduLatency(false)) + 2 + Latency::BRANCH + + Latency::BRANCH + 2 * SubuLatency(false) + 2 + Latency::BRANCH + 1; +} + +int JumpLatency() { + // Estimated max. + return 1 + AdduLatency(false) + Latency::BRANCH + 2; +} + +int AssertLatency() { return 1; } + +int MultiPushLatency() { + int latency = SubuLatency(false); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + latency++; + } + return latency; +} + +int MultiPushFPULatency() { + int latency = SubuLatency(false); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + latency += Sdc1Latency(); + } + return latency; +} + +int PushCallerSavedLatency(SaveFPRegsMode fp_mode) { + int latency = MultiPushLatency(); + if (fp_mode == SaveFPRegsMode::kSave) { + latency += MultiPushFPULatency(); + } + return latency; +} + +int MultiPopFPULatency() { + int latency = 0; + for (int16_t i = 0; i < kNumRegisters; i++) { + latency += Ldc1Latency(); + } + return latency++; +} + +int MultiPopLatency() { + int latency = 0; + for (int16_t i = 0; i < kNumRegisters; i++) { + latency++; + } + return latency++; +} + +int PopCallerSavedLatency(SaveFPRegsMode fp_mode) { + int latency = 0; + if (fp_mode == SaveFPRegsMode::kSave) { + latency += MultiPopFPULatency(); + } + return latency + MultiPopLatency(); +} + +int AssembleArchJumpLatency() { + // Estimated max. + return Latency::BRANCH; +} + +int AssembleArchBinarySearchSwitchLatency(int cases) { + if (cases < CodeGenerator::kBinarySearchSwitchMinimalCases) { + return cases * (1 + Latency::BRANCH) + AssembleArchJumpLatency(); + } + return 1 + Latency::BRANCH + AssembleArchBinarySearchSwitchLatency(cases / 2); +} + +int GenerateSwitchTableLatency() { + int latency = 0; + if (kArchVariant >= kMips32r6) { + latency = LsaLatency() + 2; + } else { + latency = 6; + } + latency += 2; + return latency; +} + +int AssembleArchTableSwitchLatency() { + return Latency::BRANCH + GenerateSwitchTableLatency(); +} + +int AssembleReturnLatency() { + // Estimated max. + return AdduLatency(false) + MultiPopLatency() + MultiPopFPULatency() + + Latency::BRANCH + 1 + AdduLatency() + 8; +} + +int TryInlineTruncateDoubleToILatency() { + return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) + + Latency::BRANCH; +} + +int CallStubDelayedLatency() { return 1 + CallLatency(); } + +int TruncateDoubleToIDelayedLatency() { + // TODO(mips): This no longer reflects how TruncateDoubleToI is called. + return TryInlineTruncateDoubleToILatency() + 1 + SubuLatency(false) + + Sdc1Latency() + CallStubDelayedLatency() + AdduLatency(false) + 1; +} + +int CheckPageFlagLatency() { + return 2 * AndLatency(false) + 1 + Latency::BRANCH; +} + +int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { + // Basic latency modeling for MIPS32 instructions. They have been determined + // in an empirical way. + switch (instr->arch_opcode()) { + case kArchCallCodeObject: +#if V8_ENABLE_WEBASSEMBLY + case kArchCallWasmFunction: +#endif // V8_ENABLE_WEBASSEMBLY + return CallLatency(); + case kArchTailCallCodeObject: +#if V8_ENABLE_WEBASSEMBLY + case kArchTailCallWasm: +#endif // V8_ENABLE_WEBASSEMBLY + case kArchTailCallAddress: + return JumpLatency(); + case kArchCallJSFunction: { + int latency = 0; + if (FLAG_debug_code) { + latency = 1 + AssertLatency(); + } + return latency + 1 + AdduLatency(false) + CallLatency(); + } + case kArchPrepareCallCFunction: + return PrepareCallCFunctionLatency(); + case kArchSaveCallerRegisters: { + auto fp_mode = + static_cast(MiscField::decode(instr->opcode())); + return PushCallerSavedLatency(fp_mode); + } + case kArchRestoreCallerRegisters: { + auto fp_mode = + static_cast(MiscField::decode(instr->opcode())); + return PopCallerSavedLatency(fp_mode); + } + case kArchPrepareTailCall: + return 2; // Estimated max. + case kArchCallCFunction: + return CallCFunctionLatency(); + case kArchJmp: + return AssembleArchJumpLatency(); + case kArchBinarySearchSwitch: + return AssembleArchBinarySearchSwitchLatency((instr->InputCount() - 2) / + 2); + case kArchTableSwitch: + return AssembleArchTableSwitchLatency(); + case kArchAbortCSADcheck: + return CallLatency() + 1; + case kArchComment: + case kArchDeoptimize: + return 0; + case kArchRet: + return AssembleReturnLatency(); + case kArchTruncateDoubleToI: + return TruncateDoubleToIDelayedLatency(); + case kArchStoreWithWriteBarrier: + return AdduLatency() + 1 + CheckPageFlagLatency(); + case kArchStackSlot: { + // Estimated max. + return AdduLatency(false) + AndLatency(false) + AssertLatency() + + AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 + + SubuLatency() + AdduLatency(); + } + case kIeee754Float64Acos: + case kIeee754Float64Acosh: + case kIeee754Float64Asin: + case kIeee754Float64Asinh: + case kIeee754Float64Atan: + case kIeee754Float64Atanh: + case kIeee754Float64Atan2: + case kIeee754Float64Cos: + case kIeee754Float64Cosh: + case kIeee754Float64Cbrt: + case kIeee754Float64Exp: + case kIeee754Float64Expm1: + case kIeee754Float64Log: + case kIeee754Float64Log1p: + case kIeee754Float64Log10: + case kIeee754Float64Log2: + case kIeee754Float64Pow: + case kIeee754Float64Sin: + case kIeee754Float64Sinh: + case kIeee754Float64Tan: + case kIeee754Float64Tanh: + return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + + CallCFunctionLatency() + MovFromFloatResultLatency(); + case kMipsAdd: + return AdduLatency(instr->InputAt(1)->IsRegister()); + case kMipsAnd: + return AndLatency(instr->InputAt(1)->IsRegister()); + case kMipsOr: + return OrLatency(instr->InputAt(1)->IsRegister()); + case kMipsXor: + return XorLatency(instr->InputAt(1)->IsRegister()); + case kMipsSub: + return SubuLatency(instr->InputAt(1)->IsRegister()); + case kMipsNor: + return NorLatency(instr->InputAt(1)->IsRegister()); + case kMipsAddOvf: + return AddOverflowLatency(); + case kMipsSubOvf: + return SubOverflowLatency(); + case kMipsMul: + return MulLatency(false); + case kMipsMulHigh: + return MulhLatency(instr->InputAt(1)->IsRegister()); + case kMipsMulHighU: + return MulhuLatency(instr->InputAt(1)->IsRegister()); + case kMipsMulOvf: + return MulOverflowLatency(); + case kMipsMod: + return ModLatency(instr->InputAt(1)->IsRegister()); + case kMipsModU: + return ModuLatency(instr->InputAt(1)->IsRegister()); + case kMipsDiv: { + int latency = DivLatency(instr->InputAt(1)->IsRegister()); + if (IsMipsArchVariant(kMips32r6)) { + return latency++; + } else { + return latency + MovzLatency(); + } + } + case kMipsDivU: { + int latency = DivuLatency(instr->InputAt(1)->IsRegister()); + if (IsMipsArchVariant(kMips32r6)) { + return latency++; + } else { + return latency + MovzLatency(); + } + } + case kMipsClz: + return ClzLatency(); + case kMipsCtz: + return CtzLatency(); + case kMipsPopcnt: + return PopcntLatency(); + case kMipsShlPair: { + if (instr->InputAt(2)->IsRegister()) { + return ShlPairLatency(); + } else { + return ShlPairLatency(false); + } + } + case kMipsShrPair: { + if (instr->InputAt(2)->IsRegister()) { + return ShrPairLatency(); + } else { + // auto immediate_operand = ImmediateOperand::cast(instr->InputAt(2)); + // return ShrPairLatency(false, immediate_operand->inline_32_value()); + return 1; + } + } + case kMipsSarPair: { + if (instr->InputAt(2)->IsRegister()) { + return SarPairLatency(); + } else { + return SarPairLatency(false); + } + } + case kMipsExt: + return ExtLatency(); + case kMipsIns: + return InsLatency(); + case kMipsRor: + return RorLatency(instr->InputAt(1)->IsRegister()); + case kMipsLsa: + return LsaLatency(); + case kMipsModD: + return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + + CallCFunctionLatency() + MovFromFloatResultLatency(); + case kMipsAddPair: + return AddPairLatency(); + case kMipsSubPair: + return SubPairLatency(); + case kMipsMulPair: + return MulPairLatency(); + case kMipsMaddS: + return MaddSLatency(); + case kMipsMaddD: + return MaddDLatency(); + case kMipsMsubS: + return MsubSLatency(); + case kMipsMsubD: + return MsubDLatency(); + case kMipsNegS: + return Neg_sLatency(); + case kMipsNegD: + return Neg_dLatency(); + case kMipsFloat64RoundDown: + case kMipsFloat64RoundTruncate: + case kMipsFloat64RoundUp: + case kMipsFloat64RoundTiesEven: + return Float64RoundLatency(); + case kMipsFloat32RoundDown: + case kMipsFloat32RoundTruncate: + case kMipsFloat32RoundUp: + case kMipsFloat32RoundTiesEven: + return Float32RoundLatency(); + case kMipsFloat32Max: + return Float32MaxLatency(); + case kMipsFloat64Max: + return Float64MaxLatency(); + case kMipsFloat32Min: + return Float32MinLatency(); + case kMipsFloat64Min: + return Float64MinLatency(); + case kMipsCvtSUw: + return CvtSUwLatency(); + case kMipsCvtDUw: + return CvtDUwLatency(); + case kMipsFloorWD: + return FloorWDLatency(); + case kMipsCeilWD: + return CeilWDLatency(); + case kMipsRoundWD: + return RoundWDLatency(); + case kMipsTruncWD: + return Trunc_w_dLatency() + Latency::MFC1; + case kMipsTruncWS: + return Latency::TRUNC_W_S + Latency::MFC1 + AdduLatency(false) + + SltLatency() + MovnLatency(); + case kMipsTruncUwD: + return Trunc_uw_dLatency(); + case kMipsTruncUwS: + return Trunc_uw_sLatency() + AdduLatency(false) + MovzLatency(); + case kMipsFloat64ExtractLowWord32: + return Latency::MFC1; + case kMipsFloat64ExtractHighWord32: + return Mfhc1Latency(); + case kMipsFloat64InsertLowWord32: { + if (IsFp32Mode()) { + return Latency::MTC1; + } else { + return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1; + } + } + case kMipsFloat64InsertHighWord32: + return Mthc1Latency(); + case kMipsFloat64SilenceNaN: + return Latency::SUB_D; + case kMipsSeb: + return SebLatency(); + case kMipsSeh: + return SehLatency(); + case kMipsUlhu: + return UlhuLatency(); + case kMipsUlh: + return UlhLatency(); + case kMipsUsh: + return UshLatency(); + case kMipsUlw: + return UlwLatency(); + case kMipsUsw: + return UswLatency(); + case kMipsUlwc1: + return Ulwc1Latency(); + case kMipsSwc1: + return MoveLatency(false) + Latency::SWC1; // Estimated max. + case kMipsUswc1: + return MoveLatency(false) + Uswc1Latency(); // Estimated max. + case kMipsLdc1: + return Ldc1Latency(); + case kMipsUldc1: + return Uldc1Latency(); + case kMipsSdc1: + return MoveLatency(false) + Sdc1Latency(); // Estimated max. + case kMipsUsdc1: + return MoveLatency(false) + Usdc1Latency(); // Estimated max. + case kMipsPush: { + if (instr->InputAt(0)->IsFPRegister()) { + auto op = LocationOperand::cast(instr->InputAt(0)); + switch (op->representation()) { + case MachineRepresentation::kFloat32: + return Latency::SWC1 + SubuLatency(false); + case MachineRepresentation::kFloat64: + return Sdc1Latency() + SubuLatency(false); + default: { + UNREACHABLE(); + } + } + } else { + return PushRegisterLatency(); + } + } + case kMipsPeek: { + if (instr->OutputAt(0)->IsFPRegister()) { + auto op = LocationOperand::cast(instr->OutputAt(0)); + if (op->representation() == MachineRepresentation::kFloat64) { + return Ldc1Latency(); + } else { + return Latency::LWC1; + } + } else { + return 1; + } + } + case kMipsStackClaim: + return SubuLatency(false); + case kMipsStoreToStackSlot: { + if (instr->InputAt(0)->IsFPRegister()) { + auto op = LocationOperand::cast(instr->InputAt(0)); + if (op->representation() == MachineRepresentation::kFloat64) { + return Sdc1Latency(); + } else if (op->representation() == MachineRepresentation::kFloat32) { + return Latency::SWC1; + } else { + return 1; // Estimated value. + } + } else { + return 1; + } + } + case kMipsByteSwap32: + return ByteSwapSignedLatency(); + case kAtomicLoadInt8: + case kAtomicLoadUint8: + case kAtomicLoadInt16: + case kAtomicLoadUint16: + case kAtomicLoadWord32: + return 2; + case kAtomicStoreWord8: + case kAtomicStoreWord16: + case kAtomicStoreWord32: + return 3; + case kAtomicExchangeInt8: + return Word32AtomicExchangeLatency(true, 8); + case kAtomicExchangeUint8: + return Word32AtomicExchangeLatency(false, 8); + case kAtomicExchangeInt16: + return Word32AtomicExchangeLatency(true, 16); + case kAtomicExchangeUint16: + return Word32AtomicExchangeLatency(false, 16); + case kAtomicExchangeWord32: { + return 1 + AdduLatency() + Ldc1Latency() + 1 + ScLatency(0) + + BranchShortLatency() + 1; + } + case kAtomicCompareExchangeInt8: + return Word32AtomicCompareExchangeLatency(true, 8); + case kAtomicCompareExchangeUint8: + return Word32AtomicCompareExchangeLatency(false, 8); + case kAtomicCompareExchangeInt16: + return Word32AtomicCompareExchangeLatency(true, 16); + case kAtomicCompareExchangeUint16: + return Word32AtomicCompareExchangeLatency(false, 16); + case kAtomicCompareExchangeWord32: + return AdduLatency() + 1 + LlLatency(0) + BranchShortLatency() + 1; + case kMipsTst: + return AndLatency(instr->InputAt(1)->IsRegister()); + case kMipsCmpS: + return MoveLatency() + CompareF32Latency(); + case kMipsCmpD: + return MoveLatency() + CompareF64Latency(); + case kArchNop: + case kArchThrowTerminator: + case kMipsCmp: + return 0; + case kArchDebugBreak: + case kArchFramePointer: + case kArchParentFramePointer: + case kMipsShl: + case kMipsShr: + case kMipsSar: + case kMipsMov: + case kMipsMaxS: + case kMipsMinS: + case kMipsMaxD: + case kMipsMinD: + case kMipsLbu: + case kMipsLb: + case kMipsSb: + case kMipsLhu: + case kMipsLh: + case kMipsSh: + case kMipsLw: + case kMipsSw: + case kMipsLwc1: + return 1; + case kMipsAddS: + return Latency::ADD_S; + case kMipsSubS: + return Latency::SUB_S; + case kMipsMulS: + return Latency::MUL_S; + case kMipsAbsS: + return Latency::ABS_S; + case kMipsAddD: + return Latency::ADD_D; + case kMipsSubD: + return Latency::SUB_D; + case kMipsAbsD: + return Latency::ABS_D; + case kMipsCvtSD: + return Latency::CVT_S_D; + case kMipsCvtDS: + return Latency::CVT_D_S; + case kMipsMulD: + return Latency::MUL_D; + case kMipsFloorWS: + return Latency::FLOOR_W_S; + case kMipsCeilWS: + return Latency::CEIL_W_S; + case kMipsRoundWS: + return Latency::ROUND_W_S; + case kMipsCvtDW: + return Latency::CVT_D_W; + case kMipsCvtSW: + return Latency::CVT_S_W; + case kMipsDivS: + return Latency::DIV_S; + case kMipsSqrtS: + return Latency::SQRT_S; + case kMipsDivD: + return Latency::DIV_D; + case kMipsSqrtD: + return Latency::SQRT_D; + default: + return 1; + } +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc new file mode 100644 index 00000000000000..67a28630a3d6d4 --- /dev/null +++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc @@ -0,0 +1,2566 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/bits.h" +#include "src/compiler/backend/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define TRACE_UNIMPL() \ + PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) + +#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) + +// Adds Mips-specific methods for generating InstructionOperands. +class MipsOperandGenerator final : public OperandGenerator { + public: + explicit MipsOperandGenerator(InstructionSelector* selector) + : OperandGenerator(selector) {} + + InstructionOperand UseOperand(Node* node, InstructionCode opcode) { + if (CanBeImmediate(node, opcode)) { + return UseImmediate(node); + } + return UseRegister(node); + } + + // Use the zero register if the node has the immediate value zero, otherwise + // assign a register. + InstructionOperand UseRegisterOrImmediateZero(Node* node) { + if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) || + (IsFloatConstant(node) && + (bit_cast(GetFloatConstantValue(node)) == 0))) { + return UseImmediate(node); + } + return UseRegister(node); + } + + bool IsIntegerConstant(Node* node) { + return (node->opcode() == IrOpcode::kInt32Constant); + } + + int64_t GetIntegerConstantValue(Node* node) { + DCHECK_EQ(IrOpcode::kInt32Constant, node->opcode()); + return OpParameter(node->op()); + } + + bool IsFloatConstant(Node* node) { + return (node->opcode() == IrOpcode::kFloat32Constant) || + (node->opcode() == IrOpcode::kFloat64Constant); + } + + double GetFloatConstantValue(Node* node) { + if (node->opcode() == IrOpcode::kFloat32Constant) { + return OpParameter(node->op()); + } + DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode()); + return OpParameter(node->op()); + } + + bool CanBeImmediate(Node* node, InstructionCode opcode) { + Int32Matcher m(node); + if (!m.HasResolvedValue()) return false; + int32_t value = m.ResolvedValue(); + switch (ArchOpcodeField::decode(opcode)) { + case kMipsShl: + case kMipsSar: + case kMipsShr: + return is_uint5(value); + case kMipsAdd: + case kMipsAnd: + case kMipsOr: + case kMipsTst: + case kMipsSub: + case kMipsXor: + return is_uint16(value); + case kMipsLb: + case kMipsLbu: + case kMipsSb: + case kMipsLh: + case kMipsLhu: + case kMipsSh: + case kMipsLw: + case kMipsSw: + case kMipsLwc1: + case kMipsSwc1: + case kMipsLdc1: + case kMipsSdc1: + // true even for 32b values, offsets > 16b + // are handled in assembler-mips.cc + return is_int32(value); + default: + return is_int16(value); + } + } + + private: + bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { + TRACE_UNIMPL(); + return false; + } +}; + +static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + MipsOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1))); +} + +static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + MipsOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1))); +} + +void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { + MipsOperandGenerator g(selector); + selector->Emit( + opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); +} + +static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + MipsOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + MipsOperandGenerator g(selector); + int32_t imm = OpParameter(node->op()); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); +} + +static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + MipsOperandGenerator g(selector); + int32_t imm = OpParameter(node->op()); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseImmediate(imm), + g.UseRegister(node->InputAt(1))); +} + +static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + MipsOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseOperand(node->InputAt(1), opcode)); +} + +bool TryMatchImmediate(InstructionSelector* selector, + InstructionCode* opcode_return, Node* node, + size_t* input_count_return, InstructionOperand* inputs) { + MipsOperandGenerator g(selector); + if (g.CanBeImmediate(node, *opcode_return)) { + *opcode_return |= AddressingModeField::encode(kMode_MRI); + inputs[0] = g.UseImmediate(node); + *input_count_return = 1; + return true; + } + return false; +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, bool has_reverse_opcode, + InstructionCode reverse_opcode, + FlagsContinuation* cont) { + MipsOperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand inputs[2]; + size_t input_count = 0; + InstructionOperand outputs[1]; + size_t output_count = 0; + + if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count, + &inputs[1])) { + inputs[0] = g.UseRegister(m.left().node()); + input_count++; + } else if (has_reverse_opcode && + TryMatchImmediate(selector, &reverse_opcode, m.left().node(), + &input_count, &inputs[1])) { + inputs[0] = g.UseRegister(m.right().node()); + opcode = reverse_opcode; + input_count++; + } else { + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.UseOperand(m.right().node(), opcode); + } + + if (cont->IsDeoptimize()) { + // If we can deoptimize as a result of the binop, we need to make sure that + // the deopt inputs are not overwritten by the binop result. One way + // to achieve that is to declare the output register as same-as-first. + outputs[output_count++] = g.DefineSameAsFirst(node); + } else { + outputs[output_count++] = g.DefineAsRegister(node); + } + + DCHECK_NE(0u, input_count); + DCHECK_EQ(1u, output_count); + DCHECK_GE(arraysize(inputs), input_count); + DCHECK_GE(arraysize(outputs), output_count); + + selector->EmitWithContinuation(opcode, output_count, outputs, input_count, + inputs, cont); +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, bool has_reverse_opcode, + InstructionCode reverse_opcode) { + FlagsContinuation cont; + VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont); +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont) { + VisitBinop(selector, node, opcode, false, kArchNop, cont); +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode) { + VisitBinop(selector, node, opcode, false, kArchNop); +} + +static void VisitPairAtomicBinop(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + MipsOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + Node* value_high = node->InputAt(3); + AddressingMode addressing_mode = kMode_None; + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); + InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index), + g.UseFixed(value, a1), + g.UseFixed(value_high, a2)}; + InstructionOperand outputs[2]; + size_t output_count = 0; + InstructionOperand temps[3]; + size_t temp_count = 0; + temps[temp_count++] = g.TempRegister(a0); + + Node* projection0 = NodeProperties::FindProjection(node, 0); + Node* projection1 = NodeProperties::FindProjection(node, 1); + if (projection0) { + outputs[output_count++] = g.DefineAsFixed(projection0, v0); + } else { + temps[temp_count++] = g.TempRegister(v0); + } + if (projection1) { + outputs[output_count++] = g.DefineAsFixed(projection1, v1); + } else { + temps[temp_count++] = g.TempRegister(v1); + } + selector->Emit(code, output_count, outputs, arraysize(inputs), inputs, + temp_count, temps); +} + +void InstructionSelector::VisitStackSlot(Node* node) { + StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); + int alignment = rep.alignment(); + int slot = frame_->AllocateSpillSlot(rep.size(), alignment); + OperandGenerator g(this); + + Emit(kArchStackSlot, g.DefineAsRegister(node), + sequence()->AddImmediate(Constant(slot)), 0, nullptr); +} + +void InstructionSelector::VisitAbortCSADcheck(Node* node) { + MipsOperandGenerator g(this); + Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); +} + +void InstructionSelector::VisitLoadTransform(Node* node) { + LoadTransformParameters params = LoadTransformParametersOf(node->op()); + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + InstructionCode opcode = kArchNop; + switch (params.transformation) { + case LoadTransformation::kS128Load8Splat: + opcode = kMipsS128Load8Splat; + break; + case LoadTransformation::kS128Load16Splat: + opcode = kMipsS128Load16Splat; + break; + case LoadTransformation::kS128Load32Splat: + opcode = kMipsS128Load32Splat; + break; + case LoadTransformation::kS128Load64Splat: + opcode = kMipsS128Load64Splat; + break; + case LoadTransformation::kS128Load8x8S: + opcode = kMipsS128Load8x8S; + break; + case LoadTransformation::kS128Load8x8U: + opcode = kMipsS128Load8x8U; + break; + case LoadTransformation::kS128Load16x4S: + opcode = kMipsS128Load16x4S; + break; + case LoadTransformation::kS128Load16x4U: + opcode = kMipsS128Load16x4U; + break; + case LoadTransformation::kS128Load32x2S: + opcode = kMipsS128Load32x2S; + break; + case LoadTransformation::kS128Load32x2U: + opcode = kMipsS128Load32x2U; + break; + default: + UNIMPLEMENTED(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); + } +} + +void InstructionSelector::VisitLoad(Node* node) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + InstructionCode opcode = kArchNop; + switch (load_rep.representation()) { + case MachineRepresentation::kFloat32: + opcode = kMipsLwc1; + break; + case MachineRepresentation::kFloat64: + opcode = kMipsLdc1; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kWord32: + opcode = kMipsLw; + break; + case MachineRepresentation::kSimd128: + opcode = kMipsMsaLd; + break; + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: // Fall through. + case MachineRepresentation::kSandboxedPointer: // Fall through. + case MachineRepresentation::kWord64: // Fall through. + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); + } +} + +void InstructionSelector::VisitProtectedLoad(Node* node) { + // TODO(eholk) + UNIMPLEMENTED(); +} + +void InstructionSelector::VisitStore(Node* node) { + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + StoreRepresentation store_rep = StoreRepresentationOf(node->op()); + WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); + MachineRepresentation rep = store_rep.representation(); + + if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) { + write_barrier_kind = kFullWriteBarrier; + } + + // TODO(mips): I guess this could be done in a better way. + if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) { + DCHECK(CanBeTaggedPointer(rep)); + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + RecordWriteMode record_write_mode = + WriteBarrierKindToRecordWriteMode(write_barrier_kind); + InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; + size_t const temp_count = arraysize(temps); + InstructionCode code = kArchStoreWithWriteBarrier; + code |= MiscField::encode(static_cast(record_write_mode)); + Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); + } else { + ArchOpcode opcode; + switch (rep) { + case MachineRepresentation::kFloat32: + opcode = kMipsSwc1; + break; + case MachineRepresentation::kFloat64: + opcode = kMipsSdc1; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = kMipsSb; + break; + case MachineRepresentation::kWord16: + opcode = kMipsSh; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kWord32: + opcode = kMipsSw; + break; + case MachineRepresentation::kSimd128: + opcode = kMipsMsaSt; + break; + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: // Fall through. + case MachineRepresentation::kSandboxedPointer: // Fall through. + case MachineRepresentation::kWord64: // Fall through. + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegister(base), g.UseImmediate(index), + g.UseRegisterOrImmediateZero(value)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired store opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); + } + } +} + +void InstructionSelector::VisitProtectedStore(Node* node) { + // TODO(eholk) + UNIMPLEMENTED(); +} + +void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitWord32And(Node* node) { + MipsOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && + m.right().HasResolvedValue()) { + uint32_t mask = m.right().ResolvedValue(); + uint32_t mask_width = base::bits::CountPopulation(mask); + uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); + if ((mask_width != 0) && (mask_msb + mask_width == 32)) { + // The mask must be contiguous, and occupy the least-significant bits. + DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); + + // Select Ext for And(Shr(x, imm), mask) where the mask is in the least + // significant bits. + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().HasResolvedValue()) { + // Any shift value can match; int32 shifts use `value % 32`. + uint32_t lsb = mleft.right().ResolvedValue() & 0x1F; + + // Ext cannot extract bits past the register size, however since + // shifting the original value would have introduced some zeros we can + // still use Ext with a smaller mask and the remaining bits will be + // zeros. + if (lsb + mask_width > 32) mask_width = 32 - lsb; + + if (lsb == 0 && mask_width == 32) { + Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node())); + } else { + Emit(kMipsExt, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), + g.TempImmediate(mask_width)); + } + return; + } + // Other cases fall through to the normal And operation. + } + } + if (m.right().HasResolvedValue()) { + uint32_t mask = m.right().ResolvedValue(); + uint32_t shift = base::bits::CountPopulation(~mask); + uint32_t msb = base::bits::CountLeadingZeros32(~mask); + if (shift != 0 && shift != 32 && msb + shift == 32) { + // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction + // and remove constant loading of invereted mask. + Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.TempImmediate(0), g.TempImmediate(shift)); + return; + } + } + VisitBinop(this, node, kMipsAnd, true, kMipsAnd); +} + +void InstructionSelector::VisitWord32Or(Node* node) { + VisitBinop(this, node, kMipsOr, true, kMipsOr); +} + +void InstructionSelector::VisitWord32Xor(Node* node) { + Int32BinopMatcher m(node); + if (m.left().IsWord32Or() && CanCover(node, m.left().node()) && + m.right().Is(-1)) { + Int32BinopMatcher mleft(m.left().node()); + if (!mleft.right().HasResolvedValue()) { + MipsOperandGenerator g(this); + Emit(kMipsNor, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + if (m.right().Is(-1)) { + // Use Nor for bit negation and eliminate constant loading for xori. + MipsOperandGenerator g(this); + Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(0)); + return; + } + VisitBinop(this, node, kMipsXor, true, kMipsXor); +} + +void InstructionSelector::VisitWord32Shl(Node* node) { + Int32BinopMatcher m(node); + if (m.left().IsWord32And() && CanCover(node, m.left().node()) && + m.right().IsInRange(1, 31)) { + MipsOperandGenerator g(this); + Int32BinopMatcher mleft(m.left().node()); + // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is + // contiguous, and the shift immediate non-zero. + if (mleft.right().HasResolvedValue()) { + uint32_t mask = mleft.right().ResolvedValue(); + uint32_t mask_width = base::bits::CountPopulation(mask); + uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); + if ((mask_width != 0) && (mask_msb + mask_width == 32)) { + uint32_t shift = m.right().ResolvedValue(); + DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); + DCHECK_NE(0u, shift); + if ((shift + mask_width) >= 32) { + // If the mask is contiguous and reaches or extends beyond the top + // bit, only the shift is needed. + Emit(kMipsShl, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseImmediate(m.right().node())); + return; + } + } + } + } + VisitRRO(this, kMipsShl, node); +} + +void InstructionSelector::VisitWord32Shr(Node* node) { + Int32BinopMatcher m(node); + if (m.left().IsWord32And() && m.right().HasResolvedValue()) { + uint32_t lsb = m.right().ResolvedValue() & 0x1F; + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().HasResolvedValue() && + mleft.right().ResolvedValue() != 0) { + // Select Ext for Shr(And(x, mask), imm) where the result of the mask is + // shifted into the least-significant bits. + uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb; + unsigned mask_width = base::bits::CountPopulation(mask); + unsigned mask_msb = base::bits::CountLeadingZeros32(mask); + if ((mask_msb + mask_width + lsb) == 32) { + MipsOperandGenerator g(this); + DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); + Emit(kMipsExt, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), + g.TempImmediate(mask_width)); + return; + } + } + } + VisitRRO(this, kMipsShr, node); +} + +void InstructionSelector::VisitWord32Sar(Node* node) { + Int32BinopMatcher m(node); + if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + m.left().IsWord32Shl() && CanCover(node, m.left().node())) { + Int32BinopMatcher mleft(m.left().node()); + if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) { + MipsOperandGenerator g(this); + uint32_t sar = m.right().ResolvedValue(); + uint32_t shl = mleft.right().ResolvedValue(); + if ((sar == shl) && (sar == 16)) { + Emit(kMipsSeh, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node())); + return; + } else if ((sar == shl) && (sar == 24)) { + Emit(kMipsSeb, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node())); + return; + } + } + } + VisitRRO(this, kMipsSar, node); +} + +static void VisitInt32PairBinop(InstructionSelector* selector, + InstructionCode pair_opcode, + InstructionCode single_opcode, Node* node) { + MipsOperandGenerator g(selector); + + Node* projection1 = NodeProperties::FindProjection(node, 1); + + if (projection1) { + // We use UseUniqueRegister here to avoid register sharing with the output + // register. + InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), + g.UseUniqueRegister(node->InputAt(2)), + g.UseUniqueRegister(node->InputAt(3))}; + + InstructionOperand outputs[] = { + g.DefineAsRegister(node), + g.DefineAsRegister(NodeProperties::FindProjection(node, 1))}; + selector->Emit(pair_opcode, 2, outputs, 4, inputs); + } else { + // The high word of the result is not used, so we emit the standard 32 bit + // instruction. + selector->Emit(single_opcode, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(2))); + } +} + +void InstructionSelector::VisitInt32PairAdd(Node* node) { + VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node); +} + +void InstructionSelector::VisitInt32PairSub(Node* node) { + VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node); +} + +void InstructionSelector::VisitInt32PairMul(Node* node) { + VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node); +} + +// Shared routine for multiple shift operations. +static void VisitWord32PairShift(InstructionSelector* selector, + InstructionCode opcode, Node* node) { + MipsOperandGenerator g(selector); + Int32Matcher m(node->InputAt(2)); + InstructionOperand shift_operand; + if (m.HasResolvedValue()) { + shift_operand = g.UseImmediate(m.node()); + } else { + shift_operand = g.UseUniqueRegister(m.node()); + } + + // We use UseUniqueRegister here to avoid register sharing with the output + // register. + InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), + shift_operand}; + + Node* projection1 = NodeProperties::FindProjection(node, 1); + + InstructionOperand outputs[2]; + InstructionOperand temps[1]; + int32_t output_count = 0; + int32_t temp_count = 0; + + outputs[output_count++] = g.DefineAsRegister(node); + if (projection1) { + outputs[output_count++] = g.DefineAsRegister(projection1); + } else { + temps[temp_count++] = g.TempRegister(); + } + + selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps); +} + +void InstructionSelector::VisitWord32PairShl(Node* node) { + VisitWord32PairShift(this, kMipsShlPair, node); +} + +void InstructionSelector::VisitWord32PairShr(Node* node) { + VisitWord32PairShift(this, kMipsShrPair, node); +} + +void InstructionSelector::VisitWord32PairSar(Node* node) { + VisitWord32PairShift(this, kMipsSarPair, node); +} + +void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); } + +void InstructionSelector::VisitWord32Ror(Node* node) { + VisitRRO(this, kMipsRor, node); +} + +void InstructionSelector::VisitWord32Clz(Node* node) { + VisitRR(this, kMipsClz, node); +} + +void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + ArchOpcode opcode = kMipsWord32AtomicPairLoad; + AddressingMode addressing_mode = kMode_MRI; + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); + InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)}; + InstructionOperand temps[3]; + size_t temp_count = 0; + temps[temp_count++] = g.TempRegister(a0); + InstructionOperand outputs[2]; + size_t output_count = 0; + + Node* projection0 = NodeProperties::FindProjection(node, 0); + Node* projection1 = NodeProperties::FindProjection(node, 1); + if (projection0) { + outputs[output_count++] = g.DefineAsFixed(projection0, v0); + } else { + temps[temp_count++] = g.TempRegister(v0); + } + if (projection1) { + outputs[output_count++] = g.DefineAsFixed(projection1, v1); + } else { + temps[temp_count++] = g.TempRegister(v1); + } + Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count, + temps); +} + +void InstructionSelector::VisitWord32AtomicPairStore(Node* node) { + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value_low = node->InputAt(2); + Node* value_high = node->InputAt(3); + + InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index), + g.UseFixed(value_low, a1), + g.UseFixed(value_high, a2)}; + InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(), + g.TempRegister()}; + Emit(kMipsWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0, + nullptr, arraysize(inputs), inputs, arraysize(temps), temps); +} + +void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) { + VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAdd); +} + +void InstructionSelector::VisitWord32AtomicPairSub(Node* node) { + VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairSub); +} + +void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) { + VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAnd); +} + +void InstructionSelector::VisitWord32AtomicPairOr(Node* node) { + VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairOr); +} + +void InstructionSelector::VisitWord32AtomicPairXor(Node* node) { + VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairXor); +} + +void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { + VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairExchange); +} + +void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { + MipsOperandGenerator g(this); + InstructionOperand inputs[] = { + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), + g.UseFixed(node->InputAt(2), a1), g.UseFixed(node->InputAt(3), a2), + g.UseFixed(node->InputAt(4), a3), g.UseUniqueRegister(node->InputAt(5))}; + + InstructionCode code = kMipsWord32AtomicPairCompareExchange | + AddressingModeField::encode(kMode_MRI); + Node* projection0 = NodeProperties::FindProjection(node, 0); + Node* projection1 = NodeProperties::FindProjection(node, 1); + InstructionOperand outputs[2]; + size_t output_count = 0; + InstructionOperand temps[3]; + size_t temp_count = 0; + temps[temp_count++] = g.TempRegister(a0); + if (projection0) { + outputs[output_count++] = g.DefineAsFixed(projection0, v0); + } else { + temps[temp_count++] = g.TempRegister(v0); + } + if (projection1) { + outputs[output_count++] = g.DefineAsFixed(projection1, v1); + } else { + temps[temp_count++] = g.TempRegister(v1); + } + Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count, + temps); +} + +void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } + +void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); } + +void InstructionSelector::VisitWord32ReverseBytes(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsByteSwap32, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + +void InstructionSelector::VisitWord32Ctz(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitWord32Popcnt(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitInt32Add(Node* node) { + MipsOperandGenerator g(this); + Int32BinopMatcher m(node); + + if (IsMipsArchVariant(kMips32r6)) { + // Select Lsa for (left + (left_of_right << imm)). + if (m.right().opcode() == IrOpcode::kWord32Shl && + CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { + Int32BinopMatcher mright(m.right().node()); + if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) { + int32_t shift_value = + static_cast(mright.right().ResolvedValue()); + if (shift_value > 0 && shift_value <= 31) { + Emit(kMipsLsa, g.DefineAsRegister(node), + g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.TempImmediate(shift_value)); + return; + } + } + } + + // Select Lsa for ((left_of_left << imm) + right). + if (m.left().opcode() == IrOpcode::kWord32Shl && + CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) { + int32_t shift_value = + static_cast(mleft.right().ResolvedValue()); + if (shift_value > 0 && shift_value <= 31) { + Emit(kMipsLsa, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), + g.UseRegister(mleft.left().node()), + g.TempImmediate(shift_value)); + return; + } + } + } + } + + VisitBinop(this, node, kMipsAdd, true, kMipsAdd); +} + +void InstructionSelector::VisitInt32Sub(Node* node) { + VisitBinop(this, node, kMipsSub); +} + +void InstructionSelector::VisitInt32Mul(Node* node) { + MipsOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) { + uint32_t value = static_cast(m.right().ResolvedValue()); + if (base::bits::IsPowerOfTwo(value)) { + Emit(kMipsShl | AddressingModeField::encode(kMode_None), + g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(base::bits::WhichPowerOfTwo(value))); + return; + } + if (base::bits::IsPowerOfTwo(value - 1) && IsMipsArchVariant(kMips32r6) && + value - 1 > 0 && value - 1 <= 31) { + Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.left().node()), + g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); + return; + } + if (base::bits::IsPowerOfTwo(value + 1)) { + InstructionOperand temp = g.TempRegister(); + Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp, + g.UseRegister(m.left().node()), + g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); + Emit(kMipsSub | AddressingModeField::encode(kMode_None), + g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); + return; + } + } + VisitRRR(this, kMipsMul, node); +} + +void InstructionSelector::VisitInt32MulHigh(Node* node) { + VisitRRR(this, kMipsMulHigh, node); +} + +void InstructionSelector::VisitUint32MulHigh(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitInt32Div(Node* node) { + MipsOperandGenerator g(this); + Int32BinopMatcher m(node); + Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitUint32Div(Node* node) { + MipsOperandGenerator g(this); + Int32BinopMatcher m(node); + Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitInt32Mod(Node* node) { + MipsOperandGenerator g(this); + Int32BinopMatcher m(node); + Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitUint32Mod(Node* node) { + MipsOperandGenerator g(this); + Int32BinopMatcher m(node); + Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { + VisitRR(this, kMipsCvtDS, node); +} + +void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) { + VisitRR(this, kMipsCvtSW, node); +} + +void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) { + VisitRR(this, kMipsCvtSUw, node); +} + +void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { + VisitRR(this, kMipsCvtDW, node); +} + +void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { + VisitRR(this, kMipsCvtDUw, node); +} + +void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { + MipsOperandGenerator g(this); + InstructionCode opcode = kMipsTruncWS; + TruncateKind kind = OpParameter(node->op()); + if (kind == TruncateKind::kSetOverflowToMin) { + opcode |= MiscField::encode(true); + } + + Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) { + MipsOperandGenerator g(this); + InstructionCode opcode = kMipsTruncUwS; + TruncateKind kind = OpParameter(node->op()); + if (kind == TruncateKind::kSetOverflowToMin) { + opcode |= MiscField::encode(true); + } + + Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { + MipsOperandGenerator g(this); + Node* value = node->InputAt(0); + // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction + // which does rounding and conversion to integer format. + if (CanCover(node, value)) { + switch (value->opcode()) { + case IrOpcode::kFloat64RoundDown: + Emit(kMipsFloorWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + case IrOpcode::kFloat64RoundUp: + Emit(kMipsCeilWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + case IrOpcode::kFloat64RoundTiesEven: + Emit(kMipsRoundWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + case IrOpcode::kFloat64RoundTruncate: + Emit(kMipsTruncWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + default: + break; + } + if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) { + Node* next = value->InputAt(0); + if (CanCover(value, next)) { + // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP)) + switch (next->opcode()) { + case IrOpcode::kFloat32RoundDown: + Emit(kMipsFloorWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + case IrOpcode::kFloat32RoundUp: + Emit(kMipsCeilWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + case IrOpcode::kFloat32RoundTiesEven: + Emit(kMipsRoundWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + case IrOpcode::kFloat32RoundTruncate: + Emit(kMipsTruncWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + default: + Emit(kMipsTruncWS, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + } + } else { + // Match float32 -> float64 -> int32 representation change path. + Emit(kMipsTruncWS, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + } + } + } + VisitRR(this, kMipsTruncWD, node); +} + +void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { + VisitRR(this, kMipsTruncUwD, node); +} + +void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) { + VisitRR(this, kMipsTruncUwD, node); +} + +void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { + MipsOperandGenerator g(this); + Node* value = node->InputAt(0); + // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding + // instruction. + if (CanCover(node, value) && + value->opcode() == IrOpcode::kChangeInt32ToFloat64) { + Emit(kMipsCvtSW, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + } + VisitRR(this, kMipsCvtSD, node); +} + +void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { + VisitRR(this, kArchTruncateDoubleToI, node); +} + +void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { + VisitRR(this, kMipsTruncWD, node); +} + +void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { + VisitRR(this, kMipsFloat64ExtractLowWord32, node); +} + +void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node), + ImmediateOperand(ImmediateOperand::INLINE_INT32, 0), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitFloat32Add(Node* node) { + MipsOperandGenerator g(this); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). + Float32BinopMatcher m(node); + if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { + // For Add.S(Mul.S(x, y), z): + Float32BinopMatcher mleft(m.left().node()); + Emit(kMipsMaddS, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { + // For Add.S(x, Mul.S(y, z)): + Float32BinopMatcher mright(m.right().node()); + Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } + VisitRRR(this, kMipsAddS, node); +} + +void InstructionSelector::VisitFloat64Add(Node* node) { + MipsOperandGenerator g(this); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). + Float64BinopMatcher m(node); + if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { + // For Add.D(Mul.D(x, y), z): + Float64BinopMatcher mleft(m.left().node()); + Emit(kMipsMaddD, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { + // For Add.D(x, Mul.D(y, z)): + Float64BinopMatcher mright(m.right().node()); + Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(mright.left().node()), + g.UseRegister(mright.right().node())); + return; + } + } + VisitRRR(this, kMipsAddD, node); +} + +void InstructionSelector::VisitFloat32Sub(Node* node) { + MipsOperandGenerator g(this); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). + Float32BinopMatcher m(node); + if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { + // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y). + Float32BinopMatcher mleft(m.left().node()); + Emit(kMipsMsubS, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + VisitRRR(this, kMipsSubS, node); +} + +void InstructionSelector::VisitFloat64Sub(Node* node) { + MipsOperandGenerator g(this); + if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). + Float64BinopMatcher m(node); + if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { + // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y). + Float64BinopMatcher mleft(m.left().node()); + Emit(kMipsMsubD, g.DefineAsRegister(node), + g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + VisitRRR(this, kMipsSubD, node); +} + +void InstructionSelector::VisitFloat32Mul(Node* node) { + VisitRRR(this, kMipsMulS, node); +} + +void InstructionSelector::VisitFloat64Mul(Node* node) { + VisitRRR(this, kMipsMulD, node); +} + +void InstructionSelector::VisitFloat32Div(Node* node) { + VisitRRR(this, kMipsDivS, node); +} + +void InstructionSelector::VisitFloat64Div(Node* node) { + VisitRRR(this, kMipsDivD, node); +} + +void InstructionSelector::VisitFloat64Mod(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12), + g.UseFixed(node->InputAt(1), f14)) + ->MarkAsCall(); +} + +void InstructionSelector::VisitFloat32Max(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsFloat32Max, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat64Max(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsFloat64Max, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat32Min(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsFloat32Min, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat64Min(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsFloat64Min, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat32Abs(Node* node) { + VisitRR(this, kMipsAbsS, node); +} + +void InstructionSelector::VisitFloat64Abs(Node* node) { + VisitRR(this, kMipsAbsD, node); +} + +void InstructionSelector::VisitFloat32Sqrt(Node* node) { + VisitRR(this, kMipsSqrtS, node); +} + +void InstructionSelector::VisitFloat64Sqrt(Node* node) { + VisitRR(this, kMipsSqrtD, node); +} + +void InstructionSelector::VisitFloat32RoundDown(Node* node) { + VisitRR(this, kMipsFloat32RoundDown, node); +} + +void InstructionSelector::VisitFloat64RoundDown(Node* node) { + VisitRR(this, kMipsFloat64RoundDown, node); +} + +void InstructionSelector::VisitFloat32RoundUp(Node* node) { + VisitRR(this, kMipsFloat32RoundUp, node); +} + +void InstructionSelector::VisitFloat64RoundUp(Node* node) { + VisitRR(this, kMipsFloat64RoundUp, node); +} + +void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { + VisitRR(this, kMipsFloat32RoundTruncate, node); +} + +void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { + VisitRR(this, kMipsFloat64RoundTruncate, node); +} + +void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { + UNREACHABLE(); +} + +void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { + VisitRR(this, kMipsFloat32RoundTiesEven, node); +} + +void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { + VisitRR(this, kMipsFloat64RoundTiesEven, node); +} + +void InstructionSelector::VisitFloat32Neg(Node* node) { + VisitRR(this, kMipsNegS, node); +} + +void InstructionSelector::VisitFloat64Neg(Node* node) { + VisitRR(this, kMipsNegD, node); +} + +void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, + InstructionCode opcode) { + MipsOperandGenerator g(this); + Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2), + g.UseFixed(node->InputAt(1), f4)) + ->MarkAsCall(); +} + +void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, + InstructionCode opcode) { + MipsOperandGenerator g(this); + Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12)) + ->MarkAsCall(); +} + +void InstructionSelector::EmitPrepareArguments( + ZoneVector* arguments, const CallDescriptor* call_descriptor, + Node* node) { + MipsOperandGenerator g(this); + + // Prepare for C function call. + if (call_descriptor->IsCFunctionCall()) { + Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast( + call_descriptor->ParameterCount())), + 0, nullptr, 0, nullptr); + + // Poke any stack arguments. + int slot = kCArgSlotCount; + for (PushParameter input : (*arguments)) { + if (input.node) { + Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), + g.TempImmediate(slot << kSystemPointerSizeLog2)); + ++slot; + } + } + } else { + // Possibly align stack here for functions. + int push_count = static_cast(call_descriptor->ParameterSlotCount()); + if (push_count > 0) { + // Calculate needed space + int stack_size = 0; + for (size_t n = 0; n < arguments->size(); ++n) { + PushParameter input = (*arguments)[n]; + if (input.node) { + stack_size += input.location.GetSizeInPointers(); + } + } + Emit(kMipsStackClaim, g.NoOutput(), + g.TempImmediate(stack_size << kSystemPointerSizeLog2)); + } + for (size_t n = 0; n < arguments->size(); ++n) { + PushParameter input = (*arguments)[n]; + if (input.node) { + Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), + g.TempImmediate(n << kSystemPointerSizeLog2)); + } + } + } +} + +void InstructionSelector::EmitPrepareResults( + ZoneVector* results, const CallDescriptor* call_descriptor, + Node* node) { + MipsOperandGenerator g(this); + + for (PushParameter output : *results) { + if (!output.location.IsCallerFrameSlot()) continue; + // Skip any alignment holes in nodes. + if (output.node != nullptr) { + DCHECK(!call_descriptor->IsCFunctionCall()); + if (output.location.GetType() == MachineType::Float32()) { + MarkAsFloat32(output.node); + } else if (output.location.GetType() == MachineType::Float64()) { + MarkAsFloat64(output.node); + } else if (output.location.GetType() == MachineType::Simd128()) { + MarkAsSimd128(output.node); + } + int offset = call_descriptor->GetOffsetToReturns(); + int reverse_slot = -output.location.GetLocation() - offset; + Emit(kMipsPeek, g.DefineAsRegister(output.node), + g.UseImmediate(reverse_slot)); + } + } +} + +bool InstructionSelector::IsTailCallAddressImmediate() { return false; } + +void InstructionSelector::VisitUnalignedLoad(Node* node) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + ArchOpcode opcode; + switch (load_rep.representation()) { + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kWord32: + opcode = kMipsUlw; + break; + case MachineRepresentation::kFloat32: + opcode = kMipsUlwc1; + break; + case MachineRepresentation::kFloat64: + opcode = kMipsUldc1; + break; + case MachineRepresentation::kSimd128: + opcode = kMipsMsaLd; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: // Fall through. + case MachineRepresentation::kSandboxedPointer: // Fall through. + case MachineRepresentation::kWord64: // Fall through. + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); + } +} + +void InstructionSelector::VisitUnalignedStore(Node* node) { + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op()); + + // TODO(mips): I guess this could be done in a better way. + ArchOpcode opcode; + switch (rep) { + case MachineRepresentation::kFloat32: + opcode = kMipsUswc1; + break; + case MachineRepresentation::kFloat64: + opcode = kMipsUsdc1; + break; + case MachineRepresentation::kWord8: + opcode = kMipsSb; + break; + case MachineRepresentation::kWord16: + opcode = kMipsUsh; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kWord32: + opcode = kMipsUsw; + break; + case MachineRepresentation::kSimd128: + opcode = kMipsMsaSt; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: // Fall through. + case MachineRepresentation::kSandboxedPointer: // Fall through. + case MachineRepresentation::kWord64: // Fall through. + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegister(base), g.UseImmediate(index), + g.UseRegisterOrImmediateZero(value)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired store opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); + } +} + +namespace { +// Shared routine for multiple compare operations. +static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, + InstructionOperand left, InstructionOperand right, + FlagsContinuation* cont) { + selector->EmitWithContinuation(opcode, left, right, cont); +} + +// Shared routine for multiple float32 compare operations. +void VisitFloat32Compare(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + MipsOperandGenerator g(selector); + Float32BinopMatcher m(node); + InstructionOperand lhs, rhs; + + lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) + : g.UseRegister(m.left().node()); + rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) + : g.UseRegister(m.right().node()); + VisitCompare(selector, kMipsCmpS, lhs, rhs, cont); +} + +// Shared routine for multiple float64 compare operations. +void VisitFloat64Compare(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + MipsOperandGenerator g(selector); + Float64BinopMatcher m(node); + InstructionOperand lhs, rhs; + + lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) + : g.UseRegister(m.left().node()); + rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) + : g.UseRegister(m.right().node()); + VisitCompare(selector, kMipsCmpD, lhs, rhs, cont); +} + +// Shared routine for multiple word compare operations. +void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont, + bool commutative) { + MipsOperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // Match immediates on left or right side of comparison. + if (g.CanBeImmediate(right, opcode)) { + if (opcode == kMipsTst) { + VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), + cont); + } else { + switch (cont->condition()) { + case kEqual: + case kNotEqual: + if (cont->IsSet()) { + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseImmediate(right), cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseRegister(right), cont); + } + break; + case kSignedLessThan: + case kSignedGreaterThanOrEqual: + case kUnsignedLessThan: + case kUnsignedGreaterThanOrEqual: + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseImmediate(right), cont); + break; + default: + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseRegister(right), cont); + } + } + } else if (g.CanBeImmediate(left, opcode)) { + if (!commutative) cont->Commute(); + if (opcode == kMipsTst) { + VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), + cont); + } else { + switch (cont->condition()) { + case kEqual: + case kNotEqual: + if (cont->IsSet()) { + VisitCompare(selector, opcode, g.UseRegister(right), + g.UseImmediate(left), cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(right), + g.UseRegister(left), cont); + } + break; + case kSignedLessThan: + case kSignedGreaterThanOrEqual: + case kUnsignedLessThan: + case kUnsignedGreaterThanOrEqual: + VisitCompare(selector, opcode, g.UseRegister(right), + g.UseImmediate(left), cont); + break; + default: + VisitCompare(selector, opcode, g.UseRegister(right), + g.UseRegister(left), cont); + } + } + } else { + VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), + cont); + } +} + +void VisitWordCompare(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + VisitWordCompare(selector, node, kMipsCmp, cont, false); +} + +} // namespace + +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + StackCheckKind kind = StackCheckKindOf(node->op()); + InstructionCode opcode = + kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); + + MipsOperandGenerator g(this); + + // No outputs. + InstructionOperand* const outputs = nullptr; + const int output_count = 0; + + // TempRegister(0) is used to store the comparison result. + // Applying an offset to this stack check requires a temp register. Offsets + // are only applied to the first stack check. If applying an offset, we must + // ensure the input and temp registers do not alias, thus kUniqueRegister. + InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; + const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1); + const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) + ? OperandGenerator::kUniqueRegister + : OperandGenerator::kRegister; + + Node* const value = node->InputAt(0); + InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; + static constexpr int input_count = arraysize(inputs); + + EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, + temp_count, temps, cont); +} + +// Shared routine for word comparisons against zero. +void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, + FlagsContinuation* cont) { + // Try to combine with comparisons against 0 by simply inverting the branch. + while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) { + Int32BinopMatcher m(value); + if (!m.right().Is(0)) break; + + user = value; + value = m.left().node(); + cont->Negate(); + } + + if (CanCover(user, value)) { + switch (value->opcode()) { + case IrOpcode::kWord32Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitWordCompare(this, value, cont); + case IrOpcode::kInt32LessThan: + cont->OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWordCompare(this, value, cont); + case IrOpcode::kInt32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWordCompare(this, value, cont); + case IrOpcode::kUint32LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitWordCompare(this, value, cont); + case IrOpcode::kUint32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitWordCompare(this, value, cont); + case IrOpcode::kFloat32Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat32LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat64Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kFloat64LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kFloat64LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kProjection: + // Check if this is the overflow output projection of an + // WithOverflow node. + if (ProjectionIndexOf(value->op()) == 1u) { + // We cannot combine the WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // is either nullptr, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + Node* const node = value->InputAt(0); + Node* const result = NodeProperties::FindProjection(node, 0); + if (!result || IsDefined(result)) { + switch (node->opcode()) { + case IrOpcode::kInt32AddWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kMipsAddOvf, cont); + case IrOpcode::kInt32SubWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kMipsSubOvf, cont); + case IrOpcode::kInt32MulWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kMipsMulOvf, cont); + default: + break; + } + } + } + break; + case IrOpcode::kWord32And: + return VisitWordCompare(this, value, kMipsTst, cont, true); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); + default: + break; + } + } + + // Continuation could not be combined with a compare, emit compare against 0. + MipsOperandGenerator g(this); + InstructionOperand const value_operand = g.UseRegister(value); + EmitWithContinuation(kMipsCmp, value_operand, g.TempImmediate(0), cont); +} + +void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { + MipsOperandGenerator g(this); + InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); + + // Emit either ArchTableSwitch or ArchBinarySearchSwitch. + if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { + static const size_t kMaxTableSwitchValueRange = 2 << 16; + size_t table_space_cost = 9 + sw.value_range(); + size_t table_time_cost = 3; + size_t lookup_space_cost = 2 + 2 * sw.case_count(); + size_t lookup_time_cost = sw.case_count(); + if (sw.case_count() > 0 && + table_space_cost + 3 * table_time_cost <= + lookup_space_cost + 3 * lookup_time_cost && + sw.min_value() > std::numeric_limits::min() && + sw.value_range() <= kMaxTableSwitchValueRange) { + InstructionOperand index_operand = value_operand; + if (sw.min_value()) { + index_operand = g.TempRegister(); + Emit(kMipsSub, index_operand, value_operand, + g.TempImmediate(sw.min_value())); + } + // Generate a table lookup. + return EmitTableSwitch(sw, index_operand); + } + } + + // Generate a tree of conditional jumps. + return EmitBinarySearchSwitch(std::move(sw), value_operand); +} + +void InstructionSelector::VisitWord32Equal(Node* const node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + Int32BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWordCompareZero(m.node(), m.left().node(), &cont); + } + VisitWordCompare(this, node, &cont); +} + +void InstructionSelector::VisitInt32LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); + VisitWordCompare(this, node, &cont); +} + +void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); + VisitWordCompare(this, node, &cont); +} + +void InstructionSelector::VisitUint32LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitWordCompare(this, node, &cont); +} + +void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitWordCompare(this, node, &cont); +} + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kMipsAddOvf, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kMipsAddOvf, &cont); +} + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kMipsSubOvf, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kMipsSubOvf, &cont); +} + +void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kMipsMulOvf, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kMipsMulOvf, &cont); +} + +void InstructionSelector::VisitFloat32Equal(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + VisitFloat32Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat32LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitFloat32Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitFloat32Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64Equal(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + VisitFloat64Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitFloat64Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitFloat64Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { + MipsOperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node), + g.UseRegister(left), g.UseRegister(right)); +} + +void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { + MipsOperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node), + g.UseRegister(left), g.UseRegister(right)); +} + +void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { + MipsOperandGenerator g(this); + Node* left = node->InputAt(0); + InstructionOperand temps[] = {g.TempRegister()}; + Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left), + arraysize(temps), temps); +} + +void InstructionSelector::VisitMemoryBarrier(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsSync, g.NoOutput()); +} + +void InstructionSelector::VisitWord32AtomicLoad(Node* node) { + // TODO(mips-dev): Confirm whether there is any mips32 chip in use and + // support atomic loads of tagged values with barriers. + AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op()); + LoadRepresentation load_rep = atomic_load_params.representation(); + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + ArchOpcode opcode; + switch (load_rep.representation()) { + case MachineRepresentation::kWord8: + opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: + case MachineRepresentation::kWord32: + opcode = kAtomicLoadWord32; + break; + default: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); + } +} + +void InstructionSelector::VisitWord32AtomicStore(Node* node) { + // TODO(mips-dev): Confirm whether there is any mips32 chip in use and + // support atomic stores of tagged values with barriers. + AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); + MachineRepresentation rep = store_params.representation(); + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + ArchOpcode opcode; + switch (rep) { + case MachineRepresentation::kWord8: + opcode = kAtomicStoreWord8; + break; + case MachineRepresentation::kWord16: + opcode = kAtomicStoreWord16; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: + case MachineRepresentation::kWord32: + opcode = kAtomicStoreWord32; + break; + default: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegister(base), g.UseImmediate(index), + g.UseRegisterOrImmediateZero(value)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired store opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); + } +} + +void InstructionSelector::VisitWord32AtomicExchange(Node* node) { + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = kAtomicExchangeInt8; + } else if (type == MachineType::Uint8()) { + opcode = kAtomicExchangeUint8; + } else if (type == MachineType::Int16()) { + opcode = kAtomicExchangeInt16; + } else if (type == MachineType::Uint16()) { + opcode = kAtomicExchangeUint16; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = kAtomicExchangeWord32; + } else { + UNREACHABLE(); + } + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); + Emit(code, 1, outputs, input_count, inputs, 3, temp); +} + +void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* old_value = node->InputAt(2); + Node* new_value = node->InputAt(3); + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = kAtomicCompareExchangeInt8; + } else if (type == MachineType::Uint8()) { + opcode = kAtomicCompareExchangeUint8; + } else if (type == MachineType::Int16()) { + opcode = kAtomicCompareExchangeInt16; + } else if (type == MachineType::Uint16()) { + opcode = kAtomicCompareExchangeUint16; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = kAtomicCompareExchangeWord32; + } else { + UNREACHABLE(); + } + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[4]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(old_value); + inputs[input_count++] = g.UseUniqueRegister(new_value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); + Emit(code, 1, outputs, input_count, inputs, 3, temp); +} + +void InstructionSelector::VisitWord32AtomicBinaryOperation( + Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, + ArchOpcode uint16_op, ArchOpcode word32_op) { + MipsOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = int8_op; + } else if (type == MachineType::Uint8()) { + opcode = uint8_op; + } else if (type == MachineType::Int16()) { + opcode = int16_op; + } else if (type == MachineType::Uint16()) { + opcode = uint16_op; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = word32_op; + } else { + UNREACHABLE(); + } + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temps[4]; + temps[0] = g.TempRegister(); + temps[1] = g.TempRegister(); + temps[2] = g.TempRegister(); + temps[3] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); + Emit(code, 1, outputs, input_count, inputs, 4, temps); +} + +#define VISIT_ATOMIC_BINOP(op) \ + void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ + VisitWord32AtomicBinaryOperation( \ + node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \ + kAtomic##op##Uint16, kAtomic##op##Word32); \ + } +VISIT_ATOMIC_BINOP(Add) +VISIT_ATOMIC_BINOP(Sub) +VISIT_ATOMIC_BINOP(And) +VISIT_ATOMIC_BINOP(Or) +VISIT_ATOMIC_BINOP(Xor) +#undef VISIT_ATOMIC_BINOP + +void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { + UNREACHABLE(); +} + +void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { + UNREACHABLE(); +} + +#define SIMD_TYPE_LIST(V) \ + V(F32x4) \ + V(I32x4) \ + V(I16x8) \ + V(I8x16) + +#define SIMD_UNOP_LIST(V) \ + V(F64x2Abs, kMipsF64x2Abs) \ + V(F64x2Neg, kMipsF64x2Neg) \ + V(F64x2Sqrt, kMipsF64x2Sqrt) \ + V(F64x2Ceil, kMipsF64x2Ceil) \ + V(F64x2Floor, kMipsF64x2Floor) \ + V(F64x2Trunc, kMipsF64x2Trunc) \ + V(F64x2NearestInt, kMipsF64x2NearestInt) \ + V(F64x2ConvertLowI32x4S, kMipsF64x2ConvertLowI32x4S) \ + V(F64x2ConvertLowI32x4U, kMipsF64x2ConvertLowI32x4U) \ + V(F64x2PromoteLowF32x4, kMipsF64x2PromoteLowF32x4) \ + V(I64x2Neg, kMipsI64x2Neg) \ + V(I64x2BitMask, kMipsI64x2BitMask) \ + V(I64x2Abs, kMipsI64x2Abs) \ + V(I64x2SConvertI32x4Low, kMipsI64x2SConvertI32x4Low) \ + V(I64x2SConvertI32x4High, kMipsI64x2SConvertI32x4High) \ + V(I64x2UConvertI32x4Low, kMipsI64x2UConvertI32x4Low) \ + V(I64x2UConvertI32x4High, kMipsI64x2UConvertI32x4High) \ + V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \ + V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \ + V(F32x4Abs, kMipsF32x4Abs) \ + V(F32x4Neg, kMipsF32x4Neg) \ + V(F32x4Sqrt, kMipsF32x4Sqrt) \ + V(F32x4RecipApprox, kMipsF32x4RecipApprox) \ + V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \ + V(F32x4Ceil, kMipsF32x4Ceil) \ + V(F32x4Floor, kMipsF32x4Floor) \ + V(F32x4Trunc, kMipsF32x4Trunc) \ + V(F32x4NearestInt, kMipsF32x4NearestInt) \ + V(F32x4DemoteF64x2Zero, kMipsF32x4DemoteF64x2Zero) \ + V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \ + V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \ + V(I32x4Neg, kMipsI32x4Neg) \ + V(I32x4BitMask, kMipsI32x4BitMask) \ + V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \ + V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \ + V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \ + V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \ + V(I32x4ExtAddPairwiseI16x8S, kMipsI32x4ExtAddPairwiseI16x8S) \ + V(I32x4ExtAddPairwiseI16x8U, kMipsI32x4ExtAddPairwiseI16x8U) \ + V(I32x4TruncSatF64x2SZero, kMipsI32x4TruncSatF64x2SZero) \ + V(I32x4TruncSatF64x2UZero, kMipsI32x4TruncSatF64x2UZero) \ + V(I16x8Neg, kMipsI16x8Neg) \ + V(I16x8BitMask, kMipsI16x8BitMask) \ + V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \ + V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \ + V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \ + V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \ + V(I16x8ExtAddPairwiseI8x16S, kMipsI16x8ExtAddPairwiseI8x16S) \ + V(I16x8ExtAddPairwiseI8x16U, kMipsI16x8ExtAddPairwiseI8x16U) \ + V(I8x16Neg, kMipsI8x16Neg) \ + V(I8x16Popcnt, kMipsI8x16Popcnt) \ + V(I8x16BitMask, kMipsI8x16BitMask) \ + V(S128Not, kMipsS128Not) \ + V(I64x2AllTrue, kMipsI64x2AllTrue) \ + V(I32x4AllTrue, kMipsI32x4AllTrue) \ + V(I16x8AllTrue, kMipsI16x8AllTrue) \ + V(I8x16AllTrue, kMipsI8x16AllTrue) \ + V(V128AnyTrue, kMipsV128AnyTrue) + +#define SIMD_SHIFT_OP_LIST(V) \ + V(I64x2Shl) \ + V(I64x2ShrS) \ + V(I64x2ShrU) \ + V(I32x4Shl) \ + V(I32x4ShrS) \ + V(I32x4ShrU) \ + V(I16x8Shl) \ + V(I16x8ShrS) \ + V(I16x8ShrU) \ + V(I8x16Shl) \ + V(I8x16ShrS) \ + V(I8x16ShrU) + +#define SIMD_BINOP_LIST(V) \ + V(F64x2Add, kMipsF64x2Add) \ + V(F64x2Sub, kMipsF64x2Sub) \ + V(F64x2Mul, kMipsF64x2Mul) \ + V(F64x2Div, kMipsF64x2Div) \ + V(F64x2Min, kMipsF64x2Min) \ + V(F64x2Max, kMipsF64x2Max) \ + V(F64x2Eq, kMipsF64x2Eq) \ + V(F64x2Ne, kMipsF64x2Ne) \ + V(F64x2Lt, kMipsF64x2Lt) \ + V(F64x2Le, kMipsF64x2Le) \ + V(I64x2Eq, kMipsI64x2Eq) \ + V(I64x2Ne, kMipsI64x2Ne) \ + V(I64x2Add, kMipsI64x2Add) \ + V(I64x2Sub, kMipsI64x2Sub) \ + V(I64x2Mul, kMipsI64x2Mul) \ + V(I64x2GtS, kMipsI64x2GtS) \ + V(I64x2GeS, kMipsI64x2GeS) \ + V(I64x2ExtMulLowI32x4S, kMipsI64x2ExtMulLowI32x4S) \ + V(I64x2ExtMulHighI32x4S, kMipsI64x2ExtMulHighI32x4S) \ + V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U) \ + V(I64x2ExtMulHighI32x4U, kMipsI64x2ExtMulHighI32x4U) \ + V(F32x4Add, kMipsF32x4Add) \ + V(F32x4Sub, kMipsF32x4Sub) \ + V(F32x4Mul, kMipsF32x4Mul) \ + V(F32x4Div, kMipsF32x4Div) \ + V(F32x4Max, kMipsF32x4Max) \ + V(F32x4Min, kMipsF32x4Min) \ + V(F32x4Eq, kMipsF32x4Eq) \ + V(F32x4Ne, kMipsF32x4Ne) \ + V(F32x4Lt, kMipsF32x4Lt) \ + V(F32x4Le, kMipsF32x4Le) \ + V(I32x4Add, kMipsI32x4Add) \ + V(I32x4Sub, kMipsI32x4Sub) \ + V(I32x4Mul, kMipsI32x4Mul) \ + V(I32x4MaxS, kMipsI32x4MaxS) \ + V(I32x4MinS, kMipsI32x4MinS) \ + V(I32x4MaxU, kMipsI32x4MaxU) \ + V(I32x4MinU, kMipsI32x4MinU) \ + V(I32x4Eq, kMipsI32x4Eq) \ + V(I32x4Ne, kMipsI32x4Ne) \ + V(I32x4GtS, kMipsI32x4GtS) \ + V(I32x4GeS, kMipsI32x4GeS) \ + V(I32x4GtU, kMipsI32x4GtU) \ + V(I32x4GeU, kMipsI32x4GeU) \ + V(I32x4Abs, kMipsI32x4Abs) \ + V(I32x4DotI16x8S, kMipsI32x4DotI16x8S) \ + V(I32x4ExtMulLowI16x8S, kMipsI32x4ExtMulLowI16x8S) \ + V(I32x4ExtMulHighI16x8S, kMipsI32x4ExtMulHighI16x8S) \ + V(I32x4ExtMulLowI16x8U, kMipsI32x4ExtMulLowI16x8U) \ + V(I32x4ExtMulHighI16x8U, kMipsI32x4ExtMulHighI16x8U) \ + V(I16x8Add, kMipsI16x8Add) \ + V(I16x8AddSatS, kMipsI16x8AddSatS) \ + V(I16x8AddSatU, kMipsI16x8AddSatU) \ + V(I16x8Sub, kMipsI16x8Sub) \ + V(I16x8SubSatS, kMipsI16x8SubSatS) \ + V(I16x8SubSatU, kMipsI16x8SubSatU) \ + V(I16x8Mul, kMipsI16x8Mul) \ + V(I16x8MaxS, kMipsI16x8MaxS) \ + V(I16x8MinS, kMipsI16x8MinS) \ + V(I16x8MaxU, kMipsI16x8MaxU) \ + V(I16x8MinU, kMipsI16x8MinU) \ + V(I16x8Eq, kMipsI16x8Eq) \ + V(I16x8Ne, kMipsI16x8Ne) \ + V(I16x8GtS, kMipsI16x8GtS) \ + V(I16x8GeS, kMipsI16x8GeS) \ + V(I16x8GtU, kMipsI16x8GtU) \ + V(I16x8GeU, kMipsI16x8GeU) \ + V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \ + V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \ + V(I16x8Q15MulRSatS, kMipsI16x8Q15MulRSatS) \ + V(I16x8ExtMulLowI8x16S, kMipsI16x8ExtMulLowI8x16S) \ + V(I16x8ExtMulHighI8x16S, kMipsI16x8ExtMulHighI8x16S) \ + V(I16x8ExtMulLowI8x16U, kMipsI16x8ExtMulLowI8x16U) \ + V(I16x8ExtMulHighI8x16U, kMipsI16x8ExtMulHighI8x16U) \ + V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \ + V(I16x8Abs, kMipsI16x8Abs) \ + V(I8x16Add, kMipsI8x16Add) \ + V(I8x16AddSatS, kMipsI8x16AddSatS) \ + V(I8x16AddSatU, kMipsI8x16AddSatU) \ + V(I8x16Sub, kMipsI8x16Sub) \ + V(I8x16SubSatS, kMipsI8x16SubSatS) \ + V(I8x16SubSatU, kMipsI8x16SubSatU) \ + V(I8x16MaxS, kMipsI8x16MaxS) \ + V(I8x16MinS, kMipsI8x16MinS) \ + V(I8x16MaxU, kMipsI8x16MaxU) \ + V(I8x16MinU, kMipsI8x16MinU) \ + V(I8x16Eq, kMipsI8x16Eq) \ + V(I8x16Ne, kMipsI8x16Ne) \ + V(I8x16GtS, kMipsI8x16GtS) \ + V(I8x16GeS, kMipsI8x16GeS) \ + V(I8x16GtU, kMipsI8x16GtU) \ + V(I8x16GeU, kMipsI8x16GeU) \ + V(I8x16RoundingAverageU, kMipsI8x16RoundingAverageU) \ + V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \ + V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \ + V(I8x16Abs, kMipsI8x16Abs) \ + V(S128And, kMipsS128And) \ + V(S128Or, kMipsS128Or) \ + V(S128Xor, kMipsS128Xor) \ + V(S128AndNot, kMipsS128AndNot) + +void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); } + +void InstructionSelector::VisitS128Zero(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsS128Zero, g.DefineSameAsFirst(node)); +} + +#define SIMD_VISIT_SPLAT(Type) \ + void InstructionSelector::Visit##Type##Splat(Node* node) { \ + VisitRR(this, kMips##Type##Splat, node); \ + } +SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) +SIMD_VISIT_SPLAT(F64x2) +#undef SIMD_VISIT_SPLAT + +#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \ + void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \ + VisitRRI(this, kMips##Type##ExtractLane##Sign, node); \ + } +SIMD_VISIT_EXTRACT_LANE(F64x2, ) +SIMD_VISIT_EXTRACT_LANE(F32x4, ) +SIMD_VISIT_EXTRACT_LANE(I32x4, ) +SIMD_VISIT_EXTRACT_LANE(I16x8, U) +SIMD_VISIT_EXTRACT_LANE(I16x8, S) +SIMD_VISIT_EXTRACT_LANE(I8x16, U) +SIMD_VISIT_EXTRACT_LANE(I8x16, S) +#undef SIMD_VISIT_EXTRACT_LANE + +#define SIMD_VISIT_REPLACE_LANE(Type) \ + void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ + VisitRRIR(this, kMips##Type##ReplaceLane, node); \ + } +SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) +SIMD_VISIT_REPLACE_LANE(F64x2) +#undef SIMD_VISIT_REPLACE_LANE + +#define SIMD_VISIT_UNOP(Name, instruction) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRR(this, instruction, node); \ + } +SIMD_UNOP_LIST(SIMD_VISIT_UNOP) +#undef SIMD_VISIT_UNOP + +#define SIMD_VISIT_SHIFT_OP(Name) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRRI(this, kMips##Name, node); \ + } +SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) +#undef SIMD_VISIT_SHIFT_OP + +#define SIMD_VISIT_BINOP(Name, instruction) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRRR(this, instruction, node); \ + } +SIMD_BINOP_LIST(SIMD_VISIT_BINOP) +#undef SIMD_VISIT_BINOP + +void InstructionSelector::VisitS128Select(Node* node) { + VisitRRRR(this, kMipsS128Select, node); +} + +#if V8_ENABLE_WEBASSEMBLY +namespace { + +struct ShuffleEntry { + uint8_t shuffle[kSimd128Size]; + ArchOpcode opcode; +}; + +static const ShuffleEntry arch_shuffles[] = { + {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, + kMipsS32x4InterleaveRight}, + {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, + kMipsS32x4InterleaveLeft}, + {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}, + kMipsS32x4PackEven}, + {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}, + kMipsS32x4PackOdd}, + {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}, + kMipsS32x4InterleaveEven}, + {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}, + kMipsS32x4InterleaveOdd}, + + {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, + kMipsS16x8InterleaveRight}, + {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, + kMipsS16x8InterleaveLeft}, + {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, + kMipsS16x8PackEven}, + {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, + kMipsS16x8PackOdd}, + {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}, + kMipsS16x8InterleaveEven}, + {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}, + kMipsS16x8InterleaveOdd}, + {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kMipsS16x4Reverse}, + {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kMipsS16x2Reverse}, + + {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, + kMipsS8x16InterleaveRight}, + {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, + kMipsS8x16InterleaveLeft}, + {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, + kMipsS8x16PackEven}, + {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, + kMipsS8x16PackOdd}, + {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, + kMipsS8x16InterleaveEven}, + {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, + kMipsS8x16InterleaveOdd}, + {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kMipsS8x8Reverse}, + {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kMipsS8x4Reverse}, + {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}}; + +bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, + size_t num_entries, bool is_swizzle, + ArchOpcode* opcode) { + uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; + for (size_t i = 0; i < num_entries; ++i) { + const ShuffleEntry& entry = table[i]; + int j = 0; + for (; j < kSimd128Size; ++j) { + if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { + break; + } + } + if (j == kSimd128Size) { + *opcode = entry.opcode; + return true; + } + } + return false; +} + +} // namespace + +void InstructionSelector::VisitI8x16Shuffle(Node* node) { + uint8_t shuffle[kSimd128Size]; + bool is_swizzle; + CanonicalizeShuffle(node, shuffle, &is_swizzle); + uint8_t shuffle32x4[4]; + ArchOpcode opcode; + if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles), + is_swizzle, &opcode)) { + VisitRRR(this, opcode, node); + return; + } + Node* input0 = node->InputAt(0); + Node* input1 = node->InputAt(1); + uint8_t offset; + MipsOperandGenerator g(this); + if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) { + Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1), + g.UseRegister(input0), g.UseImmediate(offset)); + return; + } + if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) { + Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), + g.UseRegister(input1), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4))); + return; + } + Emit(kMipsI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), + g.UseRegister(input1), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12))); +} +#else +void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); } +#endif // V8_ENABLE_WEBASSEMBLY + +void InstructionSelector::VisitI8x16Swizzle(Node* node) { + MipsOperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + // We don't want input 0 or input 1 to be the same as output, since we will + // modify output before do the calculation. + Emit(kMipsI8x16Swizzle, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); +} + +void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsSeb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitF32x4Pmin(Node* node) { + VisitUniqueRRR(this, kMipsF32x4Pmin, node); +} + +void InstructionSelector::VisitF32x4Pmax(Node* node) { + VisitUniqueRRR(this, kMipsF32x4Pmax, node); +} + +void InstructionSelector::VisitF64x2Pmin(Node* node) { + VisitUniqueRRR(this, kMipsF64x2Pmin, node); +} + +void InstructionSelector::VisitF64x2Pmax(Node* node) { + VisitUniqueRRR(this, kMipsF64x2Pmax, node); +} + +void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g, + int first_input_index, + Node* node) { + UNREACHABLE(); +} + +// static +MachineOperatorBuilder::Flags +InstructionSelector::SupportedMachineOperatorFlags() { + MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags; + if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()) { + flags |= MachineOperatorBuilder::kFloat64RoundDown | + MachineOperatorBuilder::kFloat64RoundUp | + MachineOperatorBuilder::kFloat64RoundTruncate | + MachineOperatorBuilder::kFloat64RoundTiesEven; + } + + return flags | MachineOperatorBuilder::kWord32Ctz | + MachineOperatorBuilder::kWord32Popcnt | + MachineOperatorBuilder::kInt32DivIsSafe | + MachineOperatorBuilder::kUint32DivIsSafe | + MachineOperatorBuilder::kWord32ShiftIsSafe | + MachineOperatorBuilder::kFloat32RoundDown | + MachineOperatorBuilder::kFloat32RoundUp | + MachineOperatorBuilder::kFloat32RoundTruncate | + MachineOperatorBuilder::kFloat32RoundTiesEven; +} + +// static +MachineOperatorBuilder::AlignmentRequirements +InstructionSelector::AlignmentRequirements() { + if (IsMipsArchVariant(kMips32r6)) { + return MachineOperatorBuilder::AlignmentRequirements:: + FullUnalignedAccessSupport(); + } else { + DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || + IsMipsArchVariant(kMips32r2)); + return MachineOperatorBuilder::AlignmentRequirements:: + NoUnalignedAccessSupport(); + } +} + +#undef SIMD_BINOP_LIST +#undef SIMD_SHIFT_OP_LIST +#undef SIMD_UNOP_LIST +#undef SIMD_TYPE_LIST +#undef TRACE_UNIMPL +#undef TRACE + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc new file mode 100644 index 00000000000000..69d1abb5aea980 --- /dev/null +++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc @@ -0,0 +1,4404 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/assembler-inl.h" +#include "src/codegen/callable.h" +#include "src/codegen/macro-assembler.h" +#include "src/codegen/optimized-compilation-info.h" +#include "src/codegen/riscv64/constants-riscv64.h" +#include "src/compiler/backend/code-generator-impl.h" +#include "src/compiler/backend/code-generator.h" +#include "src/compiler/backend/gap-resolver.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/osr.h" +#include "src/heap/memory-chunk.h" +#include "src/wasm/wasm-code-manager.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define __ tasm()-> + +// TODO(plind): consider renaming these macros. +#define TRACE_MSG(msg) \ + PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ + __LINE__) + +#define TRACE_UNIMPL() \ + PrintF("UNIMPLEMENTED code_generator_riscv64: %s at line %d\n", \ + __FUNCTION__, __LINE__) + +// Adds RISC-V-specific methods to convert InstructionOperands. +class RiscvOperandConverter final : public InstructionOperandConverter { + public: + RiscvOperandConverter(CodeGenerator* gen, Instruction* instr) + : InstructionOperandConverter(gen, instr) {} + + FloatRegister OutputSingleRegister(size_t index = 0) { + return ToSingleRegister(instr_->OutputAt(index)); + } + + FloatRegister InputSingleRegister(size_t index) { + return ToSingleRegister(instr_->InputAt(index)); + } + + FloatRegister ToSingleRegister(InstructionOperand* op) { + // Single (Float) and Double register namespace is same on RISC-V, + // both are typedefs of FPURegister. + return ToDoubleRegister(op); + } + + Register InputOrZeroRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) { + Constant constant = ToConstant(instr_->InputAt(index)); + switch (constant.type()) { + case Constant::kInt32: + case Constant::kInt64: + DCHECK_EQ(0, InputInt32(index)); + break; + case Constant::kFloat32: + DCHECK_EQ(0, bit_cast(InputFloat32(index))); + break; + case Constant::kFloat64: + DCHECK_EQ(0, bit_cast(InputDouble(index))); + break; + default: + UNREACHABLE(); + } + return zero_reg; + } + return InputRegister(index); + } + + DoubleRegister InputOrZeroDoubleRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; + + return InputDoubleRegister(index); + } + + DoubleRegister InputOrZeroSingleRegister(size_t index) { + if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; + + return InputSingleRegister(index); + } + + Operand InputImmediate(size_t index) { + Constant constant = ToConstant(instr_->InputAt(index)); + switch (constant.type()) { + case Constant::kInt32: + return Operand(constant.ToInt32()); + case Constant::kInt64: + return Operand(constant.ToInt64()); + case Constant::kFloat32: + return Operand::EmbeddedNumber(constant.ToFloat32()); + case Constant::kFloat64: + return Operand::EmbeddedNumber(constant.ToFloat64().value()); + case Constant::kExternalReference: + case Constant::kCompressedHeapObject: + case Constant::kHeapObject: + // TODO(plind): Maybe we should handle ExtRef & HeapObj here? + // maybe not done on arm due to const pool ?? + break; + case Constant::kDelayedStringConstant: + return Operand::EmbeddedStringConstant( + constant.ToDelayedStringConstant()); + case Constant::kRpoNumber: + UNREACHABLE(); // TODO(titzer): RPO immediates + } + UNREACHABLE(); + } + + Operand InputOperand(size_t index) { + InstructionOperand* op = instr_->InputAt(index); + if (op->IsRegister()) { + return Operand(ToRegister(op)); + } + return InputImmediate(index); + } + + MemOperand MemoryOperand(size_t* first_index) { + const size_t index = *first_index; + switch (AddressingModeField::decode(instr_->opcode())) { + case kMode_None: + break; + case kMode_MRI: + *first_index += 2; + return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); + case kMode_Root: + return MemOperand(kRootRegister, InputInt32(index)); + case kMode_MRR: + // TODO(plind): r6 address mode, to be implemented ... + UNREACHABLE(); + } + UNREACHABLE(); + } + + MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } + + MemOperand ToMemOperand(InstructionOperand* op) const { + DCHECK_NOT_NULL(op); + DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); + return SlotToMemOperand(AllocatedOperand::cast(op)->index()); + } + + MemOperand SlotToMemOperand(int slot) const { + FrameOffset offset = frame_access_state()->GetFrameOffset(slot); + return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); + } +}; + +static inline bool HasRegisterInput(Instruction* instr, size_t index) { + return instr->InputAt(index)->IsRegister(); +} +namespace { + +class OutOfLineRecordWrite final : public OutOfLineCode { + public: + OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, + Register value, Register scratch0, Register scratch1, + RecordWriteMode mode, StubCallMode stub_mode) + : OutOfLineCode(gen), + object_(object), + index_(index), + value_(value), + scratch0_(scratch0), + scratch1_(scratch1), + mode_(mode), + stub_mode_(stub_mode), + must_save_lr_(!gen->frame_access_state()->has_frame()), + zone_(gen->zone()) { + DCHECK(!AreAliased(object, index, scratch0, scratch1)); + DCHECK(!AreAliased(value, index, scratch0, scratch1)); + } + + void Generate() final { + if (COMPRESS_POINTERS_BOOL) { + __ DecompressTaggedPointer(value_, value_); + } + __ CheckPageFlag(value_, scratch0_, + MemoryChunk::kPointersToHereAreInterestingMask, eq, + exit()); + __ Add64(scratch1_, object_, index_); + RememberedSetAction const remembered_set_action = + mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit + : RememberedSetAction::kOmit; + SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() + ? SaveFPRegsMode::kSave + : SaveFPRegsMode::kIgnore; + if (must_save_lr_) { + // We need to save and restore ra if the frame was elided. + __ Push(ra); + } + if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { + __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); + } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { + // A direct call to a wasm runtime stub defined in this module. + // Just encode the stub index. This will be patched when the code + // is added to the native module and copied into wasm code space. + __ CallRecordWriteStubSaveRegisters(object_, scratch1_, + remembered_set_action, save_fp_mode, + StubCallMode::kCallWasmRuntimeStub); + } else { + __ CallRecordWriteStubSaveRegisters(object_, scratch1_, + remembered_set_action, save_fp_mode); + } + if (must_save_lr_) { + __ Pop(ra); + } + } + + private: + Register const object_; + Register const index_; + Register const value_; + Register const scratch0_; + Register const scratch1_; + RecordWriteMode const mode_; + StubCallMode const stub_mode_; + bool must_save_lr_; + Zone* zone_; +}; + +Condition FlagsConditionToConditionCmp(FlagsCondition condition) { + switch (condition) { + case kEqual: + return eq; + case kNotEqual: + return ne; + case kSignedLessThan: + return lt; + case kSignedGreaterThanOrEqual: + return ge; + case kSignedLessThanOrEqual: + return le; + case kSignedGreaterThan: + return gt; + case kUnsignedLessThan: + return Uless; + case kUnsignedGreaterThanOrEqual: + return Ugreater_equal; + case kUnsignedLessThanOrEqual: + return Uless_equal; + case kUnsignedGreaterThan: + return Ugreater; + case kUnorderedEqual: + case kUnorderedNotEqual: + break; + default: + break; + } + UNREACHABLE(); +} + +Condition FlagsConditionToConditionTst(FlagsCondition condition) { + switch (condition) { + case kNotEqual: + return ne; + case kEqual: + return eq; + default: + break; + } + UNREACHABLE(); +} + +Condition FlagsConditionToConditionOvf(FlagsCondition condition) { + switch (condition) { + case kOverflow: + return ne; + case kNotOverflow: + return eq; + default: + break; + } + UNREACHABLE(); +} + +FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, + FlagsCondition condition) { + switch (condition) { + case kEqual: + *predicate = true; + return EQ; + case kNotEqual: + *predicate = false; + return EQ; + case kUnsignedLessThan: + *predicate = true; + return LT; + case kUnsignedGreaterThanOrEqual: + *predicate = false; + return LT; + case kUnsignedLessThanOrEqual: + *predicate = true; + return LE; + case kUnsignedGreaterThan: + *predicate = false; + return LE; + case kUnorderedEqual: + case kUnorderedNotEqual: + *predicate = true; + break; + default: + *predicate = true; + break; + } + UNREACHABLE(); +} + +} // namespace + +#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ + do { \ + __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ + do { \ + __ sync(); \ + __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \ + do { \ + Label binop; \ + __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ sync(); \ + __ bind(&binop); \ + __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ + __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ + Operand(i.InputRegister(2))); \ + __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \ + size, bin_instr, representation) \ + do { \ + Label binop; \ + __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + if (representation == 32) { \ + __ And(i.TempRegister(3), i.TempRegister(0), 0x3); \ + } else { \ + DCHECK_EQ(representation, 64); \ + __ And(i.TempRegister(3), i.TempRegister(0), 0x7); \ + } \ + __ Sub64(i.TempRegister(0), i.TempRegister(0), \ + Operand(i.TempRegister(3))); \ + __ Sll32(i.TempRegister(3), i.TempRegister(3), 3); \ + __ sync(); \ + __ bind(&binop); \ + __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ + size, sign_extend); \ + __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ + Operand(i.InputRegister(2))); \ + __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ + size); \ + __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \ + do { \ + Label exchange; \ + __ sync(); \ + __ bind(&exchange); \ + __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ + __ Move(i.TempRegister(1), i.InputRegister(2)); \ + __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&exchange, ne, i.TempRegister(1), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \ + load_linked, store_conditional, sign_extend, size, representation) \ + do { \ + Label exchange; \ + __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + if (representation == 32) { \ + __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \ + } else { \ + DCHECK_EQ(representation, 64); \ + __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \ + } \ + __ Sub64(i.TempRegister(0), i.TempRegister(0), \ + Operand(i.TempRegister(1))); \ + __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \ + __ sync(); \ + __ bind(&exchange); \ + __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ + size, sign_extend); \ + __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ + size); \ + __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&exchange, ne, i.TempRegister(2), Operand(zero_reg)); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \ + store_conditional) \ + do { \ + Label compareExchange; \ + Label exit; \ + __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ sync(); \ + __ bind(&compareExchange); \ + __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&exit, ne, i.InputRegister(2), \ + Operand(i.OutputRegister(0))); \ + __ Move(i.TempRegister(2), i.InputRegister(3)); \ + __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&compareExchange, ne, i.TempRegister(2), \ + Operand(zero_reg)); \ + __ bind(&exit); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \ + load_linked, store_conditional, sign_extend, size, representation) \ + do { \ + Label compareExchange; \ + Label exit; \ + __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + if (representation == 32) { \ + __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \ + } else { \ + DCHECK_EQ(representation, 64); \ + __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \ + } \ + __ Sub64(i.TempRegister(0), i.TempRegister(0), \ + Operand(i.TempRegister(1))); \ + __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \ + __ sync(); \ + __ bind(&compareExchange); \ + __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ + size, sign_extend); \ + __ ExtractBits(i.InputRegister(2), i.InputRegister(2), 0, size, \ + sign_extend); \ + __ BranchShort(&exit, ne, i.InputRegister(2), \ + Operand(i.OutputRegister(0))); \ + __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ + size); \ + __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ + __ BranchShort(&compareExchange, ne, i.TempRegister(2), \ + Operand(zero_reg)); \ + __ bind(&exit); \ + __ sync(); \ + } while (0) + +#define ASSEMBLE_IEEE754_BINOP(name) \ + do { \ + FrameScope scope(tasm(), StackFrame::MANUAL); \ + __ PrepareCallCFunction(0, 2, kScratchReg); \ + __ MovToFloatParameters(i.InputDoubleRegister(0), \ + i.InputDoubleRegister(1)); \ + __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ + /* Move the result in the double result register. */ \ + __ MovFromFloatResult(i.OutputDoubleRegister()); \ + } while (0) + +#define ASSEMBLE_IEEE754_UNOP(name) \ + do { \ + FrameScope scope(tasm(), StackFrame::MANUAL); \ + __ PrepareCallCFunction(0, 1, kScratchReg); \ + __ MovToFloatParameter(i.InputDoubleRegister(0)); \ + __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ + /* Move the result in the double result register. */ \ + __ MovFromFloatResult(i.OutputDoubleRegister()); \ + } while (0) + +#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \ + do { \ + __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \ + i.InputSimd128Register(1)); \ + } while (0) + +#define ASSEMBLE_RVV_BINOP_INTEGER(instr, OP) \ + case kRiscvI8x16##instr: { \ + __ VU.set(kScratchReg, E8, m1); \ + __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \ + i.InputSimd128Register(1)); \ + break; \ + } \ + case kRiscvI16x8##instr: { \ + __ VU.set(kScratchReg, E16, m1); \ + __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \ + i.InputSimd128Register(1)); \ + break; \ + } \ + case kRiscvI32x4##instr: { \ + __ VU.set(kScratchReg, E32, m1); \ + __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \ + i.InputSimd128Register(1)); \ + break; \ + } + +#define ASSEMBLE_RVV_UNOP_INTEGER_VR(instr, OP) \ + case kRiscvI8x16##instr: { \ + __ VU.set(kScratchReg, E8, m1); \ + __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \ + break; \ + } \ + case kRiscvI16x8##instr: { \ + __ VU.set(kScratchReg, E16, m1); \ + __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \ + break; \ + } \ + case kRiscvI32x4##instr: { \ + __ VU.set(kScratchReg, E32, m1); \ + __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \ + break; \ + } \ + case kRiscvI64x2##instr: { \ + __ VU.set(kScratchReg, E64, m1); \ + __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \ + break; \ + } + +#define ASSEMBLE_RVV_UNOP_INTEGER_VV(instr, OP) \ + case kRiscvI8x16##instr: { \ + __ VU.set(kScratchReg, E8, m1); \ + __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ + break; \ + } \ + case kRiscvI16x8##instr: { \ + __ VU.set(kScratchReg, E16, m1); \ + __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ + break; \ + } \ + case kRiscvI32x4##instr: { \ + __ VU.set(kScratchReg, E32, m1); \ + __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ + break; \ + } \ + case kRiscvI64x2##instr: { \ + __ VU.set(kScratchReg, E64, m1); \ + __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ + break; \ + } + +void CodeGenerator::AssembleDeconstructFrame() { + __ Move(sp, fp); + __ Pop(ra, fp); +} + +void CodeGenerator::AssemblePrepareTailCall() { + if (frame_access_state()->has_frame()) { + __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); + __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + } + frame_access_state()->SetFrameAccessToSP(); +} + +void CodeGenerator::AssembleArchSelect(Instruction* instr, + FlagsCondition condition) { + UNIMPLEMENTED(); +} + +namespace { + +void AdjustStackPointerForTailCall(TurboAssembler* tasm, + FrameAccessState* state, + int new_slot_above_sp, + bool allow_shrinkage = true) { + int current_sp_offset = state->GetSPToFPSlotCount() + + StandardFrameConstants::kFixedSlotCountAboveFp; + int stack_slot_delta = new_slot_above_sp - current_sp_offset; + if (stack_slot_delta > 0) { + tasm->Sub64(sp, sp, stack_slot_delta * kSystemPointerSize); + state->IncreaseSPDelta(stack_slot_delta); + } else if (allow_shrinkage && stack_slot_delta < 0) { + tasm->Add64(sp, sp, -stack_slot_delta * kSystemPointerSize); + state->IncreaseSPDelta(stack_slot_delta); + } +} + +} // namespace + +void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, + int first_unused_slot_offset) { + AdjustStackPointerForTailCall(tasm(), frame_access_state(), + first_unused_slot_offset, false); +} + +void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, + int first_unused_slot_offset) { + AdjustStackPointerForTailCall(tasm(), frame_access_state(), + first_unused_slot_offset); +} + +// Check that {kJavaScriptCallCodeStartRegister} is correct. +void CodeGenerator::AssembleCodeStartRegisterCheck() { + __ ComputeCodeStartAddress(kScratchReg); + __ Assert(eq, AbortReason::kWrongFunctionCodeStart, + kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); +} + +// Check if the code object is marked for deoptimization. If it is, then it +// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need +// to: +// 1. read from memory the word that contains that bit, which can be found in +// the flags in the referenced {CodeDataContainer} object; +// 2. test kMarkedForDeoptimizationBit in those flags; and +// 3. if it is not zero then it jumps to the builtin. +void CodeGenerator::BailoutIfDeoptimized() { + int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; + __ LoadTaggedPointerField( + kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); + __ Lw(kScratchReg, + FieldMemOperand(kScratchReg, + CodeDataContainer::kKindSpecificFlagsOffset)); + __ And(kScratchReg, kScratchReg, + Operand(1 << Code::kMarkedForDeoptimizationBit)); + __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), + RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); +} + +// Assembles an instruction after register allocation, producing machine code. +CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( + Instruction* instr) { + RiscvOperandConverter i(this, instr); + InstructionCode opcode = instr->opcode(); + ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); + switch (arch_opcode) { + case kArchCallCodeObject: { + if (instr->InputAt(0)->IsImmediate()) { + __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); + } else { + Register reg = i.InputRegister(0); + DCHECK_IMPLIES( + instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), + reg == kJavaScriptCallCodeStartRegister); + __ CallCodeObject(reg); + } + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + break; + } + case kArchCallBuiltinPointer: { + DCHECK(!instr->InputAt(0)->IsImmediate()); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + break; + } + case kArchCallWasmFunction: { + if (instr->InputAt(0)->IsImmediate()) { + Constant constant = i.ToConstant(instr->InputAt(0)); + Address wasm_code = static_cast
(constant.ToInt64()); + __ Call(wasm_code, constant.rmode()); + } else { + __ Add64(t6, i.InputOrZeroRegister(0), 0); + __ Call(t6); + } + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + break; + } + case kArchTailCallCodeObject: { + if (instr->InputAt(0)->IsImmediate()) { + __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); + } else { + Register reg = i.InputOrZeroRegister(0); + DCHECK_IMPLIES( + instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), + reg == kJavaScriptCallCodeStartRegister); + __ JumpCodeObject(reg); + } + frame_access_state()->ClearSPDelta(); + frame_access_state()->SetFrameAccessToDefault(); + break; + } + case kArchTailCallWasm: { + if (instr->InputAt(0)->IsImmediate()) { + Constant constant = i.ToConstant(instr->InputAt(0)); + Address wasm_code = static_cast
(constant.ToInt64()); + __ Jump(wasm_code, constant.rmode()); + } else { + __ Add64(kScratchReg, i.InputOrZeroRegister(0), 0); + __ Jump(kScratchReg); + } + frame_access_state()->ClearSPDelta(); + frame_access_state()->SetFrameAccessToDefault(); + break; + } + case kArchTailCallAddress: { + CHECK(!instr->InputAt(0)->IsImmediate()); + Register reg = i.InputOrZeroRegister(0); + DCHECK_IMPLIES( + instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), + reg == kJavaScriptCallCodeStartRegister); + __ Jump(reg); + frame_access_state()->ClearSPDelta(); + frame_access_state()->SetFrameAccessToDefault(); + break; + } + case kArchCallJSFunction: { + Register func = i.InputOrZeroRegister(0); + if (FLAG_debug_code) { + // Check the function's context matches the context argument. + __ LoadTaggedPointerField( + kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); + __ Assert(eq, AbortReason::kWrongFunctionContext, cp, + Operand(kScratchReg)); + } + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + __ LoadTaggedPointerField(a2, + FieldMemOperand(func, JSFunction::kCodeOffset)); + __ CallCodeObject(a2); + RecordCallPosition(instr); + frame_access_state()->ClearSPDelta(); + break; + } + case kArchPrepareCallCFunction: { + int const num_parameters = MiscField::decode(instr->opcode()); + __ PrepareCallCFunction(num_parameters, kScratchReg); + // Frame alignment requires using FP-relative frame addressing. + frame_access_state()->SetFrameAccessToFP(); + break; + } + case kArchSaveCallerRegisters: { + fp_mode_ = + static_cast(MiscField::decode(instr->opcode())); + DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore || + fp_mode_ == SaveFPRegsMode::kSave); + // kReturnRegister0 should have been saved before entering the stub. + int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); + DCHECK(IsAligned(bytes, kSystemPointerSize)); + DCHECK_EQ(0, frame_access_state()->sp_delta()); + frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); + DCHECK(!caller_registers_saved_); + caller_registers_saved_ = true; + break; + } + case kArchRestoreCallerRegisters: { + DCHECK(fp_mode_ == + static_cast(MiscField::decode(instr->opcode()))); + DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore || + fp_mode_ == SaveFPRegsMode::kSave); + // Don't overwrite the returned value. + int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); + frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); + DCHECK_EQ(0, frame_access_state()->sp_delta()); + DCHECK(caller_registers_saved_); + caller_registers_saved_ = false; + break; + } + case kArchPrepareTailCall: + AssemblePrepareTailCall(); + break; + case kArchCallCFunction: { + int const num_gp_parameters = ParamField::decode(instr->opcode()); + int const num_fp_parameters = FPParamField::decode(instr->opcode()); + Label after_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + if (isWasmCapiFunction) { + // Put the return address in a stack slot. + __ LoadAddress(kScratchReg, &after_call, RelocInfo::EXTERNAL_REFERENCE); + __ Sd(kScratchReg, + MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + } + if (instr->InputAt(0)->IsImmediate()) { + ExternalReference ref = i.InputExternalReference(0); + __ CallCFunction(ref, num_gp_parameters, num_fp_parameters); + } else { + Register func = i.InputOrZeroRegister(0); + __ CallCFunction(func, num_gp_parameters, num_fp_parameters); + } + __ bind(&after_call); + if (isWasmCapiFunction) { + RecordSafepoint(instr->reference_map()); + } + + frame_access_state()->SetFrameAccessToDefault(); + // Ideally, we should decrement SP delta to match the change of stack + // pointer in CallCFunction. However, for certain architectures (e.g. + // ARM), there may be more strict alignment requirement, causing old SP + // to be saved on the stack. In those cases, we can not calculate the SP + // delta statically. + frame_access_state()->ClearSPDelta(); + if (caller_registers_saved_) { + // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. + // Here, we assume the sequence to be: + // kArchSaveCallerRegisters; + // kArchCallCFunction; + // kArchRestoreCallerRegisters; + int bytes = + __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); + frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); + } + break; + } + case kArchJmp: + AssembleArchJump(i.InputRpo(0)); + break; + case kArchBinarySearchSwitch: + AssembleArchBinarySearchSwitch(instr); + break; + case kArchTableSwitch: + AssembleArchTableSwitch(instr); + break; + case kArchAbortCSADcheck: + DCHECK(i.InputRegister(0) == a0); + { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), + RelocInfo::CODE_TARGET); + } + __ stop(); + break; + case kArchDebugBreak: + __ DebugBreak(); + break; + case kArchComment: + __ RecordComment(reinterpret_cast(i.InputInt64(0))); + break; + case kArchNop: + case kArchThrowTerminator: + // don't emit code for nops. + break; + case kArchDeoptimize: { + DeoptimizationExit* exit = + BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore()); + __ Branch(exit->label()); + break; + } + case kArchRet: + AssembleReturn(instr->InputAt(0)); + break; + case kArchStackPointerGreaterThan: + // Pseudo-instruction used for cmp/branch. No opcode emitted here. + break; + case kArchStackCheckOffset: + __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); + break; + case kArchFramePointer: + __ Move(i.OutputRegister(), fp); + break; + case kArchParentFramePointer: + if (frame_access_state()->has_frame()) { + __ Ld(i.OutputRegister(), MemOperand(fp, 0)); + } else { + __ Move(i.OutputRegister(), fp); + } + break; + case kArchTruncateDoubleToI: + __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), + i.InputDoubleRegister(0), DetermineStubCallMode()); + break; + case kArchStoreWithWriteBarrier: { + RecordWriteMode mode = + static_cast(MiscField::decode(instr->opcode())); + Register object = i.InputRegister(0); + Register index = i.InputRegister(1); + Register value = i.InputRegister(2); + Register scratch0 = i.TempRegister(0); + Register scratch1 = i.TempRegister(1); + auto ool = zone()->New(this, object, index, value, + scratch0, scratch1, mode, + DetermineStubCallMode()); + __ Add64(kScratchReg, object, index); + __ StoreTaggedField(value, MemOperand(kScratchReg)); + if (mode > RecordWriteMode::kValueIsPointer) { + __ JumpIfSmi(value, ool->exit()); + } + __ CheckPageFlag(object, scratch0, + MemoryChunk::kPointersFromHereAreInterestingMask, ne, + ool->entry()); + __ bind(ool->exit()); + break; + } + case kArchStackSlot: { + FrameOffset offset = + frame_access_state()->GetFrameOffset(i.InputInt32(0)); + Register base_reg = offset.from_stack_pointer() ? sp : fp; + __ Add64(i.OutputRegister(), base_reg, Operand(offset.offset())); + int alignment = i.InputInt32(1); + DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || + alignment == 16); + if (FLAG_debug_code && alignment > 0) { + // Verify that the output_register is properly aligned + __ And(kScratchReg, i.OutputRegister(), + Operand(kSystemPointerSize - 1)); + __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, + Operand(zero_reg)); + } + if (alignment == 2 * kSystemPointerSize) { + Label done; + __ Add64(kScratchReg, base_reg, Operand(offset.offset())); + __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); + __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); + __ Add64(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize); + __ bind(&done); + } else if (alignment > 2 * kSystemPointerSize) { + Label done; + __ Add64(kScratchReg, base_reg, Operand(offset.offset())); + __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); + __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); + __ li(kScratchReg2, alignment); + __ Sub64(kScratchReg2, kScratchReg2, Operand(kScratchReg)); + __ Add64(i.OutputRegister(), i.OutputRegister(), kScratchReg2); + __ bind(&done); + } + + break; + } + case kIeee754Float64Acos: + ASSEMBLE_IEEE754_UNOP(acos); + break; + case kIeee754Float64Acosh: + ASSEMBLE_IEEE754_UNOP(acosh); + break; + case kIeee754Float64Asin: + ASSEMBLE_IEEE754_UNOP(asin); + break; + case kIeee754Float64Asinh: + ASSEMBLE_IEEE754_UNOP(asinh); + break; + case kIeee754Float64Atan: + ASSEMBLE_IEEE754_UNOP(atan); + break; + case kIeee754Float64Atanh: + ASSEMBLE_IEEE754_UNOP(atanh); + break; + case kIeee754Float64Atan2: + ASSEMBLE_IEEE754_BINOP(atan2); + break; + case kIeee754Float64Cos: + ASSEMBLE_IEEE754_UNOP(cos); + break; + case kIeee754Float64Cosh: + ASSEMBLE_IEEE754_UNOP(cosh); + break; + case kIeee754Float64Cbrt: + ASSEMBLE_IEEE754_UNOP(cbrt); + break; + case kIeee754Float64Exp: + ASSEMBLE_IEEE754_UNOP(exp); + break; + case kIeee754Float64Expm1: + ASSEMBLE_IEEE754_UNOP(expm1); + break; + case kIeee754Float64Log: + ASSEMBLE_IEEE754_UNOP(log); + break; + case kIeee754Float64Log1p: + ASSEMBLE_IEEE754_UNOP(log1p); + break; + case kIeee754Float64Log2: + ASSEMBLE_IEEE754_UNOP(log2); + break; + case kIeee754Float64Log10: + ASSEMBLE_IEEE754_UNOP(log10); + break; + case kIeee754Float64Pow: + ASSEMBLE_IEEE754_BINOP(pow); + break; + case kIeee754Float64Sin: + ASSEMBLE_IEEE754_UNOP(sin); + break; + case kIeee754Float64Sinh: + ASSEMBLE_IEEE754_UNOP(sinh); + break; + case kIeee754Float64Tan: + ASSEMBLE_IEEE754_UNOP(tan); + break; + case kIeee754Float64Tanh: + ASSEMBLE_IEEE754_UNOP(tanh); + break; + case kRiscvAdd32: + __ Add32(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvAdd64: + __ Add64(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvAddOvf64: + __ AddOverflow64(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1), kScratchReg); + break; + case kRiscvSub32: + __ Sub32(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvSub64: + __ Sub64(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvSubOvf64: + __ SubOverflow64(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1), kScratchReg); + break; + case kRiscvMul32: + __ Mul32(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvMulOvf32: + __ MulOverflow32(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1), kScratchReg); + break; + case kRiscvMulHigh32: + __ Mulh32(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1)); + break; + case kRiscvMulHighU32: + __ Mulhu32(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1), kScratchReg, kScratchReg2); + break; + case kRiscvMulHigh64: + __ Mulh64(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1)); + break; + case kRiscvDiv32: { + __ Div32(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + // Set ouput to zero if divisor == 0 + __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1)); + break; + } + case kRiscvDivU32: { + __ Divu32(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1)); + // Set ouput to zero if divisor == 0 + __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1)); + break; + } + case kRiscvMod32: + __ Mod32(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvModU32: + __ Modu32(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1)); + break; + case kRiscvMul64: + __ Mul64(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvDiv64: { + __ Div64(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + // Set ouput to zero if divisor == 0 + __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1)); + break; + } + case kRiscvDivU64: { + __ Divu64(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1)); + // Set ouput to zero if divisor == 0 + __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1)); + break; + } + case kRiscvMod64: + __ Mod64(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvModU64: + __ Modu64(i.OutputRegister(), i.InputOrZeroRegister(0), + i.InputOperand(1)); + break; + case kRiscvAnd: + __ And(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvAnd32: + __ And(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0); + break; + case kRiscvOr: + __ Or(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvOr32: + __ Or(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0); + break; + case kRiscvNor: + if (instr->InputAt(1)->IsRegister()) { + __ Nor(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + } else { + DCHECK_EQ(0, i.InputOperand(1).immediate()); + __ Nor(i.OutputRegister(), i.InputOrZeroRegister(0), zero_reg); + } + break; + case kRiscvNor32: + if (instr->InputAt(1)->IsRegister()) { + __ Nor(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0); + } else { + DCHECK_EQ(0, i.InputOperand(1).immediate()); + __ Nor(i.OutputRegister(), i.InputOrZeroRegister(0), zero_reg); + __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0); + } + break; + case kRiscvXor: + __ Xor(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + break; + case kRiscvXor32: + __ Xor(i.OutputRegister(), i.InputOrZeroRegister(0), i.InputOperand(1)); + __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0); + break; + case kRiscvClz32: + __ Clz32(i.OutputRegister(), i.InputOrZeroRegister(0)); + break; + case kRiscvClz64: + __ Clz64(i.OutputRegister(), i.InputOrZeroRegister(0)); + break; + case kRiscvCtz32: { + Register src = i.InputRegister(0); + Register dst = i.OutputRegister(); + __ Ctz32(dst, src); + } break; + case kRiscvCtz64: { + Register src = i.InputRegister(0); + Register dst = i.OutputRegister(); + __ Ctz64(dst, src); + } break; + case kRiscvPopcnt32: { + Register src = i.InputRegister(0); + Register dst = i.OutputRegister(); + __ Popcnt32(dst, src, kScratchReg); + } break; + case kRiscvPopcnt64: { + Register src = i.InputRegister(0); + Register dst = i.OutputRegister(); + __ Popcnt64(dst, src, kScratchReg); + } break; + case kRiscvShl32: + if (instr->InputAt(1)->IsRegister()) { + __ Sll32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + int64_t imm = i.InputOperand(1).immediate(); + __ Sll32(i.OutputRegister(), i.InputRegister(0), + static_cast(imm)); + } + break; + case kRiscvShr32: + if (instr->InputAt(1)->IsRegister()) { + __ Srl32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + int64_t imm = i.InputOperand(1).immediate(); + __ Srl32(i.OutputRegister(), i.InputRegister(0), + static_cast(imm)); + } + break; + case kRiscvSar32: + if (instr->InputAt(1)->IsRegister()) { + __ Sra32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); + } else { + int64_t imm = i.InputOperand(1).immediate(); + __ Sra32(i.OutputRegister(), i.InputRegister(0), + static_cast(imm)); + } + break; + case kRiscvZeroExtendWord: { + __ ZeroExtendWord(i.OutputRegister(), i.InputRegister(0)); + break; + } + case kRiscvSignExtendWord: { + __ SignExtendWord(i.OutputRegister(), i.InputRegister(0)); + break; + } + case kRiscvShl64: + __ Sll64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kRiscvShr64: + __ Srl64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kRiscvSar64: + __ Sra64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kRiscvRor32: + __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kRiscvRor64: + __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); + break; + case kRiscvTst: + __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); + // Pseudo-instruction used for cmp/branch. No opcode emitted here. + break; + case kRiscvCmp: + // Pseudo-instruction used for cmp/branch. No opcode emitted here. + break; + case kRiscvCmpZero: + // Pseudo-instruction used for cmpzero/branch. No opcode emitted here. + break; + case kRiscvMov: + // TODO(plind): Should we combine mov/li like this, or use separate instr? + // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType + if (HasRegisterInput(instr, 0)) { + __ Move(i.OutputRegister(), i.InputRegister(0)); + } else { + __ li(i.OutputRegister(), i.InputOperand(0)); + } + break; + + case kRiscvCmpS: { + FPURegister left = i.InputOrZeroSingleRegister(0); + FPURegister right = i.InputOrZeroSingleRegister(1); + bool predicate; + FPUCondition cc = + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); + + if ((left == kDoubleRegZero || right == kDoubleRegZero) && + !__ IsSingleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0f); + } + // compare result set to kScratchReg + __ CompareF32(kScratchReg, cc, left, right); + } break; + case kRiscvAddS: + // TODO(plind): add special case: combine mult & add. + __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvSubS: + __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvMulS: + // TODO(plind): add special case: right op is -1.0, see arm port. + __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvDivS: + __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvModS: { + // TODO(bmeurer): We should really get rid of this special instruction, + // and generate a CallAddress instruction instead. + FrameScope scope(tasm(), StackFrame::MANUAL); + __ PrepareCallCFunction(0, 2, kScratchReg); + __ MovToFloatParameters(i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) + __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); + // Move the result in the double result register. + __ MovFromFloatResult(i.OutputSingleRegister()); + break; + } + case kRiscvAbsS: + __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + break; + case kRiscvNegS: + __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); + break; + case kRiscvSqrtS: { + __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kRiscvMaxS: + __ fmax_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvMinS: + __ fmin_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvCmpD: { + FPURegister left = i.InputOrZeroDoubleRegister(0); + FPURegister right = i.InputOrZeroDoubleRegister(1); + bool predicate; + FPUCondition cc = + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); + if ((left == kDoubleRegZero || right == kDoubleRegZero) && + !__ IsDoubleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0); + } + // compare result set to kScratchReg + __ CompareF64(kScratchReg, cc, left, right); + } break; + case kRiscvAddD: + // TODO(plind): add special case: combine mult & add. + __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvSubD: + __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvMulD: + // TODO(plind): add special case: right op is -1.0, see arm port. + __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvDivD: + __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvModD: { + // TODO(bmeurer): We should really get rid of this special instruction, + // and generate a CallAddress instruction instead. + FrameScope scope(tasm(), StackFrame::MANUAL); + __ PrepareCallCFunction(0, 2, kScratchReg); + __ MovToFloatParameters(i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); + // Move the result in the double result register. + __ MovFromFloatResult(i.OutputDoubleRegister()); + break; + } + case kRiscvAbsD: + __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvNegD: + __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvSqrtD: { + __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + } + case kRiscvMaxD: + __ fmax_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvMinD: + __ fmin_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputDoubleRegister(1)); + break; + case kRiscvFloat64RoundDown: { + __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat32RoundDown: { + __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat64RoundTruncate: { + __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat32RoundTruncate: { + __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat64RoundUp: { + __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat32RoundUp: { + __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat64RoundTiesEven: { + __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat32RoundTiesEven: { + __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0), + kScratchDoubleReg); + break; + } + case kRiscvFloat32Max: { + __ Float32Max(i.OutputSingleRegister(), i.InputSingleRegister(0), + i.InputSingleRegister(1)); + break; + } + case kRiscvFloat64Max: { + __ Float64Max(i.OutputSingleRegister(), i.InputSingleRegister(0), + i.InputSingleRegister(1)); + break; + } + case kRiscvFloat32Min: { + __ Float32Min(i.OutputSingleRegister(), i.InputSingleRegister(0), + i.InputSingleRegister(1)); + break; + } + case kRiscvFloat64Min: { + __ Float64Min(i.OutputSingleRegister(), i.InputSingleRegister(0), + i.InputSingleRegister(1)); + break; + } + case kRiscvFloat64SilenceNaN: + __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvCvtSD: + __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvCvtDS: + __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); + break; + case kRiscvCvtDW: { + __ fcvt_d_w(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvCvtSW: { + __ fcvt_s_w(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvCvtSUw: { + __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvCvtSL: { + __ fcvt_s_l(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvCvtDL: { + __ fcvt_d_l(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvCvtDUw: { + __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvCvtDUl: { + __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvCvtSUl: { + __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + } + case kRiscvFloorWD: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Floor_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvCeilWD: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Ceil_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvRoundWD: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Round_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvTruncWD: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Trunc_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvFloorWS: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Floor_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvCeilWS: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Ceil_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvRoundWS: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Round_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvTruncWS: { + Label done; + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); + __ Trunc_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result); + + // On RISCV, if the input value exceeds INT32_MAX, the result of fcvt + // is INT32_MAX. Note that, since INT32_MAX means the lower 31-bits are + // all 1s, INT32_MAX cannot be represented precisely as a float, so an + // fcvt result of INT32_MAX always indicate overflow. + // + // In wasm_compiler, to detect overflow in converting a FP value, fval, to + // integer, V8 checks whether I2F(F2I(fval)) equals fval. However, if fval + // == INT32_MAX+1, the value of I2F(F2I(fval)) happens to be fval. So, + // INT32_MAX is not a good value to indicate overflow. Instead, we will + // use INT32_MIN as the converted result of an out-of-range FP value, + // exploiting the fact that INT32_MAX+1 is INT32_MIN. + // + // If the result of conversion overflow, the result will be set to + // INT32_MIN. Here we detect overflow by testing whether output + 1 < + // output (i.e., kScratchReg < output) + if (set_overflow_to_min_i32) { + __ Add32(kScratchReg, i.OutputRegister(), 1); + __ BranchShort(&done, lt, i.OutputRegister(), Operand(kScratchReg)); + __ Move(i.OutputRegister(), kScratchReg); + __ bind(&done); + } + break; + } + case kRiscvTruncLS: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Trunc_l_s(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvTruncLD: { + Label done; + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode()); + __ Trunc_l_d(i.OutputRegister(), i.InputDoubleRegister(0), result); + if (set_overflow_to_min_i64) { + __ Add64(kScratchReg, i.OutputRegister(), 1); + __ BranchShort(&done, lt, i.OutputRegister(), Operand(kScratchReg)); + __ Move(i.OutputRegister(), kScratchReg); + __ bind(&done); + } + break; + } + case kRiscvTruncUwD: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvTruncUwS: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + bool set_overflow_to_min_u32 = MiscField::decode(instr->opcode()); + __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), result); + + // On RISCV, if the input value exceeds UINT32_MAX, the result of fcvt + // is UINT32_MAX. Note that, since UINT32_MAX means all 32-bits are 1s, + // UINT32_MAX cannot be represented precisely as float, so an fcvt result + // of UINT32_MAX always indicates overflow. + // + // In wasm_compiler.cc, to detect overflow in converting a FP value, fval, + // to integer, V8 checks whether I2F(F2I(fval)) equals fval. However, if + // fval == UINT32_MAX+1, the value of I2F(F2I(fval)) happens to be fval. + // So, UINT32_MAX is not a good value to indicate overflow. Instead, we + // will use 0 as the converted result of an out-of-range FP value, + // exploiting the fact that UINT32_MAX+1 is 0. + if (set_overflow_to_min_u32) { + __ Add32(kScratchReg, i.OutputRegister(), 1); + // Set ouput to zero if result overflows (i.e., UINT32_MAX) + __ LoadZeroIfConditionZero(i.OutputRegister(), kScratchReg); + } + break; + } + case kRiscvTruncUlS: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), result); + break; + } + case kRiscvTruncUlD: { + Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; + __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), result); + break; + } + case kRiscvBitcastDL: + __ fmv_x_d(i.OutputRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvBitcastLD: + __ fmv_d_x(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + case kRiscvBitcastInt32ToFloat32: + __ fmv_w_x(i.OutputDoubleRegister(), i.InputRegister(0)); + break; + case kRiscvBitcastFloat32ToInt32: + __ fmv_x_w(i.OutputRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvFloat64ExtractLowWord32: + __ ExtractLowWordFromF64(i.OutputRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvFloat64ExtractHighWord32: + __ ExtractHighWordFromF64(i.OutputRegister(), i.InputDoubleRegister(0)); + break; + case kRiscvFloat64InsertLowWord32: + __ InsertLowWordF64(i.OutputDoubleRegister(), i.InputRegister(1)); + break; + case kRiscvFloat64InsertHighWord32: + __ InsertHighWordF64(i.OutputDoubleRegister(), i.InputRegister(1)); + break; + // ... more basic instructions ... + + case kRiscvSignExtendByte: + __ SignExtendByte(i.OutputRegister(), i.InputRegister(0)); + break; + case kRiscvSignExtendShort: + __ SignExtendShort(i.OutputRegister(), i.InputRegister(0)); + break; + case kRiscvLbu: + __ Lbu(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvLb: + __ Lb(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvSb: + __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kRiscvLhu: + __ Lhu(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvUlhu: + __ Ulhu(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvLh: + __ Lh(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvUlh: + __ Ulh(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvSh: + __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kRiscvUsh: + __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kRiscvLw: + __ Lw(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvUlw: + __ Ulw(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvLwu: + __ Lwu(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvUlwu: + __ Ulwu(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvLd: + __ Ld(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvUld: + __ Uld(i.OutputRegister(), i.MemoryOperand()); + break; + case kRiscvSw: + __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kRiscvUsw: + __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kRiscvSd: + __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kRiscvUsd: + __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand()); + break; + case kRiscvLoadFloat: { + __ LoadFloat(i.OutputSingleRegister(), i.MemoryOperand()); + break; + } + case kRiscvULoadFloat: { + __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg); + break; + } + case kRiscvStoreFloat: { + size_t index = 0; + MemOperand operand = i.MemoryOperand(&index); + FPURegister ft = i.InputOrZeroSingleRegister(index); + if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0f); + } + __ StoreFloat(ft, operand); + break; + } + case kRiscvUStoreFloat: { + size_t index = 0; + MemOperand operand = i.MemoryOperand(&index); + FPURegister ft = i.InputOrZeroSingleRegister(index); + if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0f); + } + __ UStoreFloat(ft, operand, kScratchReg); + break; + } + case kRiscvLoadDouble: + __ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand()); + break; + case kRiscvULoadDouble: + __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); + break; + case kRiscvStoreDouble: { + FPURegister ft = i.InputOrZeroDoubleRegister(2); + if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0); + } + __ StoreDouble(ft, i.MemoryOperand()); + break; + } + case kRiscvUStoreDouble: { + FPURegister ft = i.InputOrZeroDoubleRegister(2); + if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0); + } + __ UStoreDouble(ft, i.MemoryOperand(), kScratchReg); + break; + } + case kRiscvSync: { + __ sync(); + break; + } + case kRiscvPush: + if (instr->InputAt(0)->IsFPRegister()) { + __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); + __ Sub32(sp, sp, Operand(kDoubleSize)); + frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize); + } else { + __ Push(i.InputOrZeroRegister(0)); + frame_access_state()->IncreaseSPDelta(1); + } + break; + case kRiscvPeek: { + int reverse_slot = i.InputInt32(0); + int offset = + FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); + if (instr->OutputAt(0)->IsFPRegister()) { + LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); + if (op->representation() == MachineRepresentation::kFloat64) { + __ LoadDouble(i.OutputDoubleRegister(), MemOperand(fp, offset)); + } else { + DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32); + __ LoadFloat( + i.OutputSingleRegister(0), + MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset)); + } + } else { + __ Ld(i.OutputRegister(0), MemOperand(fp, offset)); + } + break; + } + case kRiscvStackClaim: { + __ Sub64(sp, sp, Operand(i.InputInt32(0))); + frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / + kSystemPointerSize); + break; + } + case kRiscvStoreToStackSlot: { + if (instr->InputAt(0)->IsFPRegister()) { + if (instr->InputAt(0)->IsSimd128Register()) { + Register dst = sp; + if (i.InputInt32(1) != 0) { + dst = kScratchReg2; + __ Add64(kScratchReg2, sp, Operand(i.InputInt32(1))); + } + __ VU.set(kScratchReg, E8, m1); + __ vs(i.InputSimd128Register(0), dst, 0, E8); + } else { + __ StoreDouble(i.InputDoubleRegister(0), + MemOperand(sp, i.InputInt32(1))); + } + } else { + __ Sd(i.InputOrZeroRegister(0), MemOperand(sp, i.InputInt32(1))); + } + break; + } + case kRiscvByteSwap64: { + __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8, kScratchReg); + break; + } + case kRiscvByteSwap32: { + __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4, kScratchReg); + break; + } + case kAtomicLoadInt8: + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); + ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb); + break; + case kAtomicLoadUint8: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu); + break; + case kAtomicLoadInt16: + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); + ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh); + break; + case kAtomicLoadUint16: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu); + break; + case kAtomicLoadWord32: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw); + break; + case kRiscvWord64AtomicLoadUint64: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld); + break; + case kAtomicStoreWord8: + ASSEMBLE_ATOMIC_STORE_INTEGER(Sb); + break; + case kAtomicStoreWord16: + ASSEMBLE_ATOMIC_STORE_INTEGER(Sh); + break; + case kAtomicStoreWord32: + ASSEMBLE_ATOMIC_STORE_INTEGER(Sw); + break; + case kRiscvWord64AtomicStoreWord64: + ASSEMBLE_ATOMIC_STORE_INTEGER(Sd); + break; + case kAtomicExchangeInt8: + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32); + break; + case kAtomicExchangeUint8: + switch (AtomicWidthField::decode(opcode)) { + case AtomicWidth::kWord32: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32); + break; + case AtomicWidth::kWord64: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64); + break; + } + break; + case kAtomicExchangeInt16: + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32); + break; + case kAtomicExchangeUint16: + switch (AtomicWidthField::decode(opcode)) { + case AtomicWidth::kWord32: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32); + break; + case AtomicWidth::kWord64: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64); + break; + } + break; + case kAtomicExchangeWord32: + switch (AtomicWidthField::decode(opcode)) { + case AtomicWidth::kWord32: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc); + break; + case AtomicWidth::kWord64: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64); + break; + } + break; + case kRiscvWord64AtomicExchangeUint64: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd); + break; + case kAtomicCompareExchangeInt8: + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32); + break; + case kAtomicCompareExchangeUint8: + switch (AtomicWidthField::decode(opcode)) { + case AtomicWidth::kWord32: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32); + break; + case AtomicWidth::kWord64: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64); + break; + } + break; + case kAtomicCompareExchangeInt16: + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32); + break; + case kAtomicCompareExchangeUint16: + switch (AtomicWidthField::decode(opcode)) { + case AtomicWidth::kWord32: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32); + break; + case AtomicWidth::kWord64: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64); + break; + } + break; + case kAtomicCompareExchangeWord32: + switch (AtomicWidthField::decode(opcode)) { + case AtomicWidth::kWord32: + __ Sll32(i.InputRegister(2), i.InputRegister(2), 0); + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc); + break; + case AtomicWidth::kWord64: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64); + break; + } + break; + case kRiscvWord64AtomicCompareExchangeUint64: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd); + break; +#define ATOMIC_BINOP_CASE(op, inst32, inst64) \ + case kAtomic##op##Int8: \ + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \ + ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \ + break; \ + case kAtomic##op##Uint8: \ + switch (AtomicWidthField::decode(opcode)) { \ + case AtomicWidth::kWord32: \ + ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \ + break; \ + case AtomicWidth::kWord64: \ + ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \ + break; \ + } \ + break; \ + case kAtomic##op##Int16: \ + DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \ + ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \ + break; \ + case kAtomic##op##Uint16: \ + switch (AtomicWidthField::decode(opcode)) { \ + case AtomicWidth::kWord32: \ + ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \ + break; \ + case AtomicWidth::kWord64: \ + ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \ + break; \ + } \ + break; \ + case kAtomic##op##Word32: \ + switch (AtomicWidthField::decode(opcode)) { \ + case AtomicWidth::kWord32: \ + ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \ + break; \ + case AtomicWidth::kWord64: \ + ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \ + break; \ + } \ + break; \ + case kRiscvWord64Atomic##op##Uint64: \ + ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \ + break; + ATOMIC_BINOP_CASE(Add, Add32, Add64) + ATOMIC_BINOP_CASE(Sub, Sub32, Sub64) + ATOMIC_BINOP_CASE(And, And, And) + ATOMIC_BINOP_CASE(Or, Or, Or) + ATOMIC_BINOP_CASE(Xor, Xor, Xor) +#undef ATOMIC_BINOP_CASE + case kRiscvAssertEqual: + __ Assert(eq, static_cast(i.InputOperand(2).immediate()), + i.InputRegister(0), Operand(i.InputRegister(1))); + break; + case kRiscvStoreCompressTagged: { + size_t index = 0; + MemOperand operand = i.MemoryOperand(&index); + __ StoreTaggedField(i.InputOrZeroRegister(index), operand); + break; + } + case kRiscvLoadDecompressTaggedSigned: { + CHECK(instr->HasOutput()); + Register result = i.OutputRegister(); + MemOperand operand = i.MemoryOperand(); + __ DecompressTaggedSigned(result, operand); + break; + } + case kRiscvLoadDecompressTaggedPointer: { + CHECK(instr->HasOutput()); + Register result = i.OutputRegister(); + MemOperand operand = i.MemoryOperand(); + __ DecompressTaggedPointer(result, operand); + break; + } + case kRiscvLoadDecompressAnyTagged: { + CHECK(instr->HasOutput()); + Register result = i.OutputRegister(); + MemOperand operand = i.MemoryOperand(); + __ DecompressAnyTagged(result, operand); + break; + } + case kRiscvRvvSt: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + Register dst = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm() + : kScratchReg; + if (i.MemoryOperand().offset() != 0) { + __ Add64(dst, i.MemoryOperand().rm(), i.MemoryOperand().offset()); + } + __ vs(i.InputSimd128Register(2), dst, 0, VSew::E8); + break; + } + case kRiscvRvvLd: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + Register src = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm() + : kScratchReg; + if (i.MemoryOperand().offset() != 0) { + __ Add64(src, i.MemoryOperand().rm(), i.MemoryOperand().offset()); + } + __ vl(i.OutputSimd128Register(), src, 0, VSew::E8); + break; + } + case kRiscvS128Zero: { + Simd128Register dst = i.OutputSimd128Register(); + __ VU.set(kScratchReg, E8, m1); + __ vmv_vx(dst, zero_reg); + break; + } + case kRiscvS128Load32Zero: { + Simd128Register dst = i.OutputSimd128Register(); + __ VU.set(kScratchReg, E32, m1); + __ Lwu(kScratchReg, i.MemoryOperand()); + __ vmv_sx(dst, kScratchReg); + break; + } + case kRiscvS128Load64Zero: { + Simd128Register dst = i.OutputSimd128Register(); + __ VU.set(kScratchReg, E64, m1); + __ Ld(kScratchReg, i.MemoryOperand()); + __ vmv_sx(dst, kScratchReg); + break; + } + case kRiscvS128LoadLane: { + Simd128Register dst = i.OutputSimd128Register(); + DCHECK_EQ(dst, i.InputSimd128Register(0)); + auto sz = static_cast(MiscField::decode(instr->opcode())); + __ LoadLane(sz, dst, i.InputUint8(1), i.MemoryOperand(2)); + break; + } + case kRiscvS128StoreLane: { + Simd128Register src = i.InputSimd128Register(0); + DCHECK_EQ(src, i.InputSimd128Register(0)); + auto sz = static_cast(MiscField::decode(instr->opcode())); + __ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2)); + break; + } + case kRiscvS128Load64ExtendS: { + __ VU.set(kScratchReg, E64, m1); + __ Ld(kScratchReg, i.MemoryOperand()); + __ vmv_vx(kSimd128ScratchReg, kScratchReg); + __ VU.set(kScratchReg, i.InputInt8(2), m1); + __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvS128Load64ExtendU: { + __ VU.set(kScratchReg, E64, m1); + __ Ld(kScratchReg, i.MemoryOperand()); + __ vmv_vx(kSimd128ScratchReg, kScratchReg); + __ VU.set(kScratchReg, i.InputInt8(2), m1); + __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvS128LoadSplat: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + switch (i.InputInt8(2)) { + case E8: + __ Lb(kScratchReg, i.MemoryOperand()); + break; + case E16: + __ Lh(kScratchReg, i.MemoryOperand()); + break; + case E32: + __ Lw(kScratchReg, i.MemoryOperand()); + break; + case E64: + __ Ld(kScratchReg, i.MemoryOperand()); + break; + default: + UNREACHABLE(); + } + __ vmv_vx(i.OutputSimd128Register(), kScratchReg); + break; + } + case kRiscvS128AllOnes: { + __ VU.set(kScratchReg, E8, m1); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vnot_vv(i.OutputSimd128Register(), i.OutputSimd128Register()); + break; + } + case kRiscvS128Select: { + __ VU.set(kScratchReg, E8, m1); + __ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(0)); + __ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0)); + __ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2), + kSimd128ScratchReg2); + __ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg, + kSimd128ScratchReg2); + break; + } + case kRiscvS128And: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvS128Or: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvS128Xor: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvS128Not: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvS128AndNot: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.OutputSimd128Register()); + break; + } + case kRiscvS128Const: { + Simd128Register dst = i.OutputSimd128Register(); + uint8_t imm[16]; + *reinterpret_cast(imm) = + make_uint64(i.InputUint32(1), i.InputUint32(0)); + *(reinterpret_cast(imm) + 1) = + make_uint64(i.InputUint32(3), i.InputUint32(2)); + __ WasmRvvS128const(dst, imm); + break; + } + case kRiscvI64x2Mul: { + (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1); + __ vmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI64x2Add: { + (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1); + __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvVrgather: { + Simd128Register index = i.InputSimd128Register(0); + if (!(instr->InputAt(1)->IsImmediate())) { + index = i.InputSimd128Register(1); + } else { + __ VU.set(kScratchReg, E64, m1); + __ li(kScratchReg, i.InputInt64(1)); + __ vmv_sx(kSimd128ScratchReg3, kScratchReg); + index = kSimd128ScratchReg3; + } + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + if (i.OutputSimd128Register() == i.InputSimd128Register(0)) { + __ vrgather_vv(kSimd128ScratchReg, i.InputSimd128Register(0), index); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + } else { + __ vrgather_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + index); + } + break; + } + case kRiscvVslidedown: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + if (instr->InputAt(1)->IsImmediate()) { + DCHECK(is_uint5(i.InputInt32(1))); + __ vslidedown_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1)); + } else { + __ vslidedown_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } + break; + } + case kRiscvI8x16RoundingAverageU: { + __ VU.set(kScratchReg2, E8, m1); + __ vwaddu_vv(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputSimd128Register(1)); + __ li(kScratchReg, 1); + __ vwaddu_wx(kSimd128ScratchReg3, kSimd128ScratchReg, kScratchReg); + __ li(kScratchReg, 2); + __ VU.set(kScratchReg2, E16, m2); + __ vdivu_vx(kSimd128ScratchReg3, kSimd128ScratchReg3, kScratchReg); + __ VU.set(kScratchReg2, E8, m1); + __ vnclipu_vi(i.OutputSimd128Register(), kSimd128ScratchReg3, 0); + break; + } + case kRiscvI16x8RoundingAverageU: { + __ VU.set(kScratchReg2, E16, m1); + __ vwaddu_vv(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputSimd128Register(1)); + __ li(kScratchReg, 1); + __ vwaddu_wx(kSimd128ScratchReg3, kSimd128ScratchReg, kScratchReg); + __ li(kScratchReg, 2); + __ VU.set(kScratchReg2, E32, m2); + __ vdivu_vx(kSimd128ScratchReg3, kSimd128ScratchReg3, kScratchReg); + __ VU.set(kScratchReg2, E16, m1); + __ vnclipu_vi(i.OutputSimd128Register(), kSimd128ScratchReg3, 0); + break; + } + case kRiscvI16x8Mul: { + (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1); + __ vmv_vx(kSimd128ScratchReg, zero_reg); + __ vmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI16x8Q15MulRSatS: { + __ VU.set(kScratchReg, E16, m1); + __ vsmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI16x8AddSatS: { + (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1); + __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI16x8AddSatU: { + (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1); + __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI8x16AddSatS: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI8x16AddSatU: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI64x2Sub: { + (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1); + __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI16x8SubSatS: { + (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1); + __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI16x8SubSatU: { + (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1); + __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI8x16SubSatS: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI8x16SubSatU: { + (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1); + __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI8x16ExtractLaneU: { + __ VU.set(kScratchReg, E8, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputInt8(1)); + __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg); + __ slli(i.OutputRegister(), i.OutputRegister(), 64 - 8); + __ srli(i.OutputRegister(), i.OutputRegister(), 64 - 8); + break; + } + case kRiscvI8x16ExtractLaneS: { + __ VU.set(kScratchReg, E8, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputInt8(1)); + __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg); + break; + } + case kRiscvI16x8ExtractLaneU: { + __ VU.set(kScratchReg, E16, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputInt8(1)); + __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg); + __ slli(i.OutputRegister(), i.OutputRegister(), 64 - 16); + __ srli(i.OutputRegister(), i.OutputRegister(), 64 - 16); + break; + } + case kRiscvI16x8ExtractLaneS: { + __ VU.set(kScratchReg, E16, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputInt8(1)); + __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg); + break; + } + case kRiscvI8x16ShrU: { + __ VU.set(kScratchReg, E8, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 8 - 1); + __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 8); + } + break; + } + case kRiscvI16x8ShrU: { + __ VU.set(kScratchReg, E16, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 16 - 1); + __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 16); + } + break; + } + case kRiscvI32x4Mul: { + __ VU.set(kScratchReg, E32, m1); + __ vmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvI32x4TruncSatF64x2SZero: { + __ VU.set(kScratchReg, E64, m1); + __ vmv_vx(kSimd128ScratchReg, zero_reg); + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + __ vmv_vv(kSimd128ScratchReg3, i.InputSimd128Register(0)); + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vfncvt_x_f_w(kSimd128ScratchReg, kSimd128ScratchReg3, MaskType::Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI32x4TruncSatF64x2UZero: { + __ VU.set(kScratchReg, E64, m1); + __ vmv_vx(kSimd128ScratchReg, zero_reg); + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + __ vmv_vv(kSimd128ScratchReg3, i.InputSimd128Register(0)); + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vfncvt_xu_f_w(kSimd128ScratchReg, kSimd128ScratchReg3, MaskType::Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI32x4ShrU: { + __ VU.set(kScratchReg, E32, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 32 - 1); + __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 32); + } + break; + } + case kRiscvI64x2ShrU: { + __ VU.set(kScratchReg, E64, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 64 - 1); + __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + if (is_uint5(i.InputInt6(1) % 64)) { + __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt6(1) % 64); + } else { + __ li(kScratchReg, i.InputInt6(1) % 64); + __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg); + } + } + break; + } + case kRiscvI8x16ShrS: { + __ VU.set(kScratchReg, E8, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 8 - 1); + __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 8); + } + break; + } + case kRiscvI16x8ShrS: { + __ VU.set(kScratchReg, E16, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 16 - 1); + __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 16); + } + break; + } + case kRiscvI32x4ShrS: { + __ VU.set(kScratchReg, E32, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 32 - 1); + __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 32); + } + break; + } + case kRiscvI64x2ShrS: { + __ VU.set(kScratchReg, E64, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 64 - 1); + __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + if (is_uint5(i.InputInt6(1) % 64)) { + __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt6(1) % 64); + } else { + __ li(kScratchReg, i.InputInt6(1) % 64); + __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg); + } + } + break; + } + case kRiscvI32x4ExtractLane: { + __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0), + i.InputInt8(1), E32, m1); + break; + } + case kRiscvI32x4Abs: { + __ VU.set(kScratchReg, E32, m1); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero); + __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + MaskType::Mask); + break; + } + case kRiscvI16x8Abs: { + __ VU.set(kScratchReg, E16, m1); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero); + __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + MaskType::Mask); + break; + } + case kRiscvI8x16Abs: { + __ VU.set(kScratchReg, E8, m1); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero); + __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + MaskType::Mask); + break; + } + case kRiscvI64x2Abs: { + __ VU.set(kScratchReg, E64, m1); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero); + __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + MaskType::Mask); + break; + } + case kRiscvI64x2ExtractLane: { + __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0), + i.InputInt8(1), E64, m1); + break; + } + case kRiscvI8x16Eq: { + __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E8, m1); + break; + } + case kRiscvI16x8Eq: { + __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E16, m1); + break; + } + case kRiscvI32x4Eq: { + __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E32, m1); + break; + } + case kRiscvI64x2Eq: { + __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E64, m1); + break; + } + case kRiscvI8x16Ne: { + __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E8, m1); + break; + } + case kRiscvI16x8Ne: { + __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E16, m1); + break; + } + case kRiscvI32x4Ne: { + __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E32, m1); + break; + } + case kRiscvI64x2Ne: { + __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E64, m1); + break; + } + case kRiscvI8x16GeS: { + __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E8, m1); + break; + } + case kRiscvI16x8GeS: { + __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E16, m1); + break; + } + case kRiscvI32x4GeS: { + __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E32, m1); + break; + } + case kRiscvI64x2GeS: { + __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E64, m1); + break; + } + case kRiscvI8x16GeU: { + __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E8, m1); + break; + } + case kRiscvI16x8GeU: { + __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E16, m1); + break; + } + case kRiscvI32x4GeU: { + __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E32, m1); + break; + } + case kRiscvI8x16GtS: { + __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E8, m1); + break; + } + case kRiscvI16x8GtS: { + __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E16, m1); + break; + } + case kRiscvI32x4GtS: { + __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E32, m1); + break; + } + case kRiscvI64x2GtS: { + __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E64, m1); + break; + } + case kRiscvI8x16GtU: { + __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E8, m1); + break; + } + case kRiscvI16x8GtU: { + __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E16, m1); + break; + } + case kRiscvI32x4GtU: { + __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1), E32, m1); + break; + } + case kRiscvI8x16Shl: { + __ VU.set(kScratchReg, E8, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 8 - 1); + __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 8); + } + break; + } + case kRiscvI16x8Shl: { + __ VU.set(kScratchReg, E16, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 16 - 1); + __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 16); + } + break; + } + case kRiscvI32x4Shl: { + __ VU.set(kScratchReg, E32, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 32 - 1); + __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt5(1) % 32); + } + break; + } + case kRiscvI64x2Shl: { + __ VU.set(kScratchReg, E64, m1); + if (instr->InputAt(1)->IsRegister()) { + __ andi(i.InputRegister(1), i.InputRegister(1), 64 - 1); + __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputRegister(1)); + } else { + if (is_int5(i.InputInt6(1) % 64)) { + __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputInt6(1) % 64); + } else { + __ li(kScratchReg, i.InputInt6(1) % 64); + __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg); + } + } + break; + } + case kRiscvI8x16ReplaceLane: { + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + __ VU.set(kScratchReg, E64, m1); + __ li(kScratchReg, 0x1 << i.InputInt8(1)); + __ vmv_sx(v0, kScratchReg); + __ VU.set(kScratchReg, E8, m1); + __ vmerge_vx(dst, i.InputRegister(2), src); + break; + } + case kRiscvI16x8ReplaceLane: { + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + __ VU.set(kScratchReg, E16, m1); + __ li(kScratchReg, 0x1 << i.InputInt8(1)); + __ vmv_sx(v0, kScratchReg); + __ vmerge_vx(dst, i.InputRegister(2), src); + break; + } + case kRiscvI64x2ReplaceLane: { + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + __ VU.set(kScratchReg, E64, m1); + __ li(kScratchReg, 0x1 << i.InputInt8(1)); + __ vmv_sx(v0, kScratchReg); + __ vmerge_vx(dst, i.InputRegister(2), src); + break; + } + case kRiscvI32x4ReplaceLane: { + Simd128Register src = i.InputSimd128Register(0); + Simd128Register dst = i.OutputSimd128Register(); + __ VU.set(kScratchReg, E32, m1); + __ li(kScratchReg, 0x1 << i.InputInt8(1)); + __ vmv_sx(v0, kScratchReg); + __ vmerge_vx(dst, i.InputRegister(2), src); + break; + } + case kRiscvI8x16BitMask: { + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + __ VU.set(kScratchReg, E8, m1); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero); + __ VU.set(kScratchReg, E32, m1); + __ vmv_xs(dst, kSimd128ScratchReg); + break; + } + case kRiscvI16x8BitMask: { + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + __ VU.set(kScratchReg, E16, m1); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero); + __ VU.set(kScratchReg, E32, m1); + __ vmv_xs(dst, kSimd128ScratchReg); + break; + } + case kRiscvI32x4BitMask: { + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + __ VU.set(kScratchReg, E32, m1); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero); + __ vmv_xs(dst, kSimd128ScratchReg); + break; + } + case kRiscvI64x2BitMask: { + Register dst = i.OutputRegister(); + Simd128Register src = i.InputSimd128Register(0); + __ VU.set(kScratchReg, E64, m1); + __ vmv_vx(kSimd128RegZero, zero_reg); + __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero); + __ VU.set(kScratchReg, E32, m1); + __ vmv_xs(dst, kSimd128ScratchReg); + break; + } + case kRiscvV128AnyTrue: { + __ VU.set(kScratchReg, E8, m1); + Register dst = i.OutputRegister(); + Label t; + __ vmv_sx(kSimd128ScratchReg, zero_reg); + __ vredmaxu_vs(kSimd128ScratchReg, i.InputSimd128Register(0), + kSimd128ScratchReg); + __ vmv_xs(dst, kSimd128ScratchReg); + __ beq(dst, zero_reg, &t); + __ li(dst, 1); + __ bind(&t); + break; + } + case kRiscvI64x2AllTrue: { + __ VU.set(kScratchReg, E64, m1); + Register dst = i.OutputRegister(); + Label all_true; + __ li(kScratchReg, -1); + __ vmv_sx(kSimd128ScratchReg, kScratchReg); + __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0), + kSimd128ScratchReg); + __ vmv_xs(dst, kSimd128ScratchReg); + __ beqz(dst, &all_true); + __ li(dst, 1); + __ bind(&all_true); + break; + } + case kRiscvI32x4AllTrue: { + __ VU.set(kScratchReg, E32, m1); + Register dst = i.OutputRegister(); + Label all_true; + __ li(kScratchReg, -1); + __ vmv_sx(kSimd128ScratchReg, kScratchReg); + __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0), + kSimd128ScratchReg); + __ vmv_xs(dst, kSimd128ScratchReg); + __ beqz(dst, &all_true); + __ li(dst, 1); + __ bind(&all_true); + break; + } + case kRiscvI16x8AllTrue: { + __ VU.set(kScratchReg, E16, m1); + Register dst = i.OutputRegister(); + Label all_true; + __ li(kScratchReg, -1); + __ vmv_sx(kSimd128ScratchReg, kScratchReg); + __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0), + kSimd128ScratchReg); + __ vmv_xs(dst, kSimd128ScratchReg); + __ beqz(dst, &all_true); + __ li(dst, 1); + __ bind(&all_true); + break; + } + case kRiscvI8x16AllTrue: { + __ VU.set(kScratchReg, E8, m1); + Register dst = i.OutputRegister(); + Label all_true; + __ li(kScratchReg, -1); + __ vmv_sx(kSimd128ScratchReg, kScratchReg); + __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0), + kSimd128ScratchReg); + __ vmv_xs(dst, kSimd128ScratchReg); + __ beqz(dst, &all_true); + __ li(dst, 1); + __ bind(&all_true); + break; + } + case kRiscvI8x16Shuffle: { + VRegister dst = i.OutputSimd128Register(), + src0 = i.InputSimd128Register(0), + src1 = i.InputSimd128Register(1); + + int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2)); + int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4)); + __ VU.set(kScratchReg, VSew::E64, Vlmul::m1); + __ li(kScratchReg, imm2); + __ vmv_sx(kSimd128ScratchReg2, kScratchReg); + __ vslideup_vi(kSimd128ScratchReg, kSimd128ScratchReg2, 1); + __ li(kScratchReg, imm1); + __ vmv_sx(kSimd128ScratchReg, kScratchReg); + + __ VU.set(kScratchReg, E8, m1); + if (dst == src0) { + __ vmv_vv(kSimd128ScratchReg2, src0); + src0 = kSimd128ScratchReg2; + } else if (dst == src1) { + __ vmv_vv(kSimd128ScratchReg2, src1); + src1 = kSimd128ScratchReg2; + } + __ vrgather_vv(dst, src0, kSimd128ScratchReg); + __ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16); + __ vrgather_vv(kSimd128ScratchReg3, src1, kSimd128ScratchReg); + __ vor_vv(dst, dst, kSimd128ScratchReg3); + break; + } + case kRiscvI8x16Popcnt: { + VRegister dst = i.OutputSimd128Register(), + src = i.InputSimd128Register(0); + Label t; + + __ VU.set(kScratchReg, E8, m1); + __ vmv_vv(kSimd128ScratchReg, src); + __ vmv_vv(dst, kSimd128RegZero); + + __ bind(&t); + __ vmsne_vv(v0, kSimd128ScratchReg, kSimd128RegZero); + __ vadd_vi(dst, dst, 1, Mask); + __ vadd_vi(kSimd128ScratchReg2, kSimd128ScratchReg, -1, Mask); + __ vand_vv(kSimd128ScratchReg, kSimd128ScratchReg, kSimd128ScratchReg2); + // kScratchReg = -1 if kSimd128ScratchReg == 0 i.e. no active element + __ vfirst_m(kScratchReg, kSimd128ScratchReg); + __ bgez(kScratchReg, &t); + break; + } + case kRiscvF64x2NearestInt: { + __ Round_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF64x2Trunc: { + __ Trunc_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF64x2Sqrt: { + __ VU.set(kScratchReg, E64, m1); + __ vfsqrt_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF64x2Splat: { + (__ VU).set(kScratchReg, E64, m1); + __ fmv_x_d(kScratchReg, i.InputDoubleRegister(0)); + __ vmv_vx(i.OutputSimd128Register(), kScratchReg); + break; + } + case kRiscvF64x2Abs: { + __ VU.set(kScratchReg, VSew::E64, Vlmul::m1); + __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF64x2Neg: { + __ VU.set(kScratchReg, VSew::E64, Vlmul::m1); + __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF64x2Add: { + __ VU.set(kScratchReg, E64, m1); + __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvF64x2Sub: { + __ VU.set(kScratchReg, E64, m1); + __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvF64x2Ceil: { + __ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF64x2Floor: { + __ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF64x2Ne: { + __ VU.set(kScratchReg, E64, m1); + __ vmfne_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF64x2Eq: { + __ VU.set(kScratchReg, E64, m1); + __ vmfeq_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF64x2ReplaceLane: { + __ VU.set(kScratchReg, E64, m1); + __ li(kScratchReg, 0x1 << i.InputInt8(1)); + __ vmv_sx(v0, kScratchReg); + __ fmv_x_d(kScratchReg, i.InputSingleRegister(2)); + __ vmerge_vx(i.OutputSimd128Register(), kScratchReg, + i.InputSimd128Register(0)); + break; + } + case kRiscvF64x2Lt: { + __ VU.set(kScratchReg, E64, m1); + __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF64x2Le: { + __ VU.set(kScratchReg, E64, m1); + __ vmfle_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF64x2Pmax: { + __ VU.set(kScratchReg, E64, m1); + __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kRiscvF64x2Pmin: { + __ VU.set(kScratchReg, E64, m1); + __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0)); + __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kRiscvF64x2Min: { + __ VU.set(kScratchReg, E64, m1); + const int64_t kNaN = 0x7ff8000000000000L; + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(1)); + __ vand_vv(v0, v0, kSimd128ScratchReg); + __ li(kScratchReg, kNaN); + __ vmv_vx(kSimd128ScratchReg, kScratchReg); + __ vfmin_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(0), Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvF64x2Max: { + __ VU.set(kScratchReg, E64, m1); + const int64_t kNaN = 0x7ff8000000000000L; + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(1)); + __ vand_vv(v0, v0, kSimd128ScratchReg); + __ li(kScratchReg, kNaN); + __ vmv_vx(kSimd128ScratchReg, kScratchReg); + __ vfmax_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(0), Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvF64x2Div: { + __ VU.set(kScratchReg, E64, m1); + __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvF64x2Mul: { + __ VU.set(kScratchReg, E64, m1); + __ VU.set(RoundingMode::RTZ); + __ vfmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvF64x2ExtractLane: { + __ VU.set(kScratchReg, E64, m1); + if (is_uint5(i.InputInt8(1))) { + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputInt8(1)); + } else { + __ li(kScratchReg, i.InputInt8(1)); + __ vslidedown_vx(kSimd128ScratchReg, i.InputSimd128Register(0), + kScratchReg); + } + __ vfmv_fs(i.OutputDoubleRegister(), kSimd128ScratchReg); + break; + } + case kRiscvF64x2PromoteLowF32x4: { + __ VU.set(kScratchReg, E32, mf2); + if (i.OutputSimd128Register() != i.InputSimd128Register(0)) { + __ vfwcvt_f_f_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + } else { + __ vfwcvt_f_f_v(kSimd128ScratchReg3, i.InputSimd128Register(0)); + __ VU.set(kScratchReg, E64, m1); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg3); + } + break; + } + case kRiscvF64x2ConvertLowI32x4S: { + __ VU.set(kScratchReg, E32, mf2); + if (i.OutputSimd128Register() != i.InputSimd128Register(0)) { + __ vfwcvt_f_x_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + } else { + __ vfwcvt_f_x_v(kSimd128ScratchReg3, i.InputSimd128Register(0)); + __ VU.set(kScratchReg, E64, m1); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg3); + } + break; + } + case kRiscvF64x2ConvertLowI32x4U: { + __ VU.set(kScratchReg, E32, mf2); + if (i.OutputSimd128Register() != i.InputSimd128Register(0)) { + __ vfwcvt_f_xu_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + } else { + __ vfwcvt_f_xu_v(kSimd128ScratchReg3, i.InputSimd128Register(0)); + __ VU.set(kScratchReg, E64, m1); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg3); + } + break; + } + case kRiscvF64x2Qfma: { + __ VU.set(kScratchReg, E64, m1); + __ vfmadd_vv(i.InputSimd128Register(1), i.InputSimd128Register(2), + i.InputSimd128Register(0)); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kRiscvF64x2Qfms: { + __ VU.set(kScratchReg, E64, m1); + __ vfnmsub_vv(i.InputSimd128Register(1), i.InputSimd128Register(2), + i.InputSimd128Register(0)); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kRiscvF32x4ExtractLane: { + __ VU.set(kScratchReg, E32, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), + i.InputInt8(1)); + __ vfmv_fs(i.OutputDoubleRegister(), kSimd128ScratchReg); + break; + } + case kRiscvF32x4Trunc: { + __ Trunc_f(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF32x4NearestInt: { + __ Round_f(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF32x4DemoteF64x2Zero: { + __ VU.set(kScratchReg, E32, mf2); + __ vfncvt_f_f_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ VU.set(kScratchReg, E32, m1); + __ vmv_vi(v0, 12); + __ vmerge_vx(i.OutputSimd128Register(), zero_reg, + i.OutputSimd128Register()); + break; + } + case kRiscvF32x4Neg: { + __ VU.set(kScratchReg, VSew::E32, Vlmul::m1); + __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Abs: { + __ VU.set(kScratchReg, VSew::E32, Vlmul::m1); + __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Splat: { + (__ VU).set(kScratchReg, E32, m1); + __ fmv_x_w(kScratchReg, i.InputSingleRegister(0)); + __ vmv_vx(i.OutputSimd128Register(), kScratchReg); + break; + } + case kRiscvF32x4Add: { + __ VU.set(kScratchReg, E32, m1); + __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvF32x4Sub: { + __ VU.set(kScratchReg, E32, m1); + __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvF32x4Ceil: { + __ Ceil_f(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF32x4Floor: { + __ Floor_f(i.OutputSimd128Register(), i.InputSimd128Register(0), + kScratchReg, kSimd128ScratchReg); + break; + } + case kRiscvF32x4UConvertI32x4: { + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vfcvt_f_xu_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4SConvertI32x4: { + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vfcvt_f_x_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Div: { + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvF32x4Mul: { + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vfmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Eq: { + __ VU.set(kScratchReg, E32, m1); + __ vmfeq_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF32x4Ne: { + __ VU.set(kScratchReg, E32, m1); + __ vmfne_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF32x4ReplaceLane: { + __ VU.set(kScratchReg, E32, m1); + __ li(kScratchReg, 0x1 << i.InputInt8(1)); + __ vmv_sx(v0, kScratchReg); + __ fmv_x_w(kScratchReg, i.InputSingleRegister(2)); + __ vmerge_vx(i.OutputSimd128Register(), kScratchReg, + i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Lt: { + __ VU.set(kScratchReg, E32, m1); + __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF32x4Le: { + __ VU.set(kScratchReg, E32, m1); + __ vmfle_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register()); + break; + } + case kRiscvF32x4Pmax: { + __ VU.set(kScratchReg, E32, m1); + __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1)); + __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Pmin: { + __ VU.set(kScratchReg, E32, m1); + __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0)); + __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Sqrt: { + __ VU.set(kScratchReg, E32, m1); + __ vfsqrt_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Max: { + __ VU.set(kScratchReg, E32, m1); + const int32_t kNaN = 0x7FC00000; + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(1)); + __ vand_vv(v0, v0, kSimd128ScratchReg); + __ li(kScratchReg, kNaN); + __ vmv_vx(kSimd128ScratchReg, kScratchReg); + __ vfmax_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(0), Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvF32x4Min: { + __ VU.set(kScratchReg, E32, m1); + const int32_t kNaN = 0x7FC00000; + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(1)); + __ vand_vv(v0, v0, kSimd128ScratchReg); + __ li(kScratchReg, kNaN); + __ vmv_vx(kSimd128ScratchReg, kScratchReg); + __ vfmin_vv(kSimd128ScratchReg, i.InputSimd128Register(1), + i.InputSimd128Register(0), Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvF32x4RecipApprox: { + __ VU.set(kScratchReg, E32, m1); + __ vfrec7_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4RecipSqrtApprox: { + __ VU.set(kScratchReg, E32, m1); + __ vfrsqrt7_v(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kRiscvF32x4Qfma: { + __ VU.set(kScratchReg, E32, m1); + __ vfmadd_vv(i.InputSimd128Register(1), i.InputSimd128Register(2), + i.InputSimd128Register(0)); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kRiscvF32x4Qfms: { + __ VU.set(kScratchReg, E32, m1); + __ vfnmsub_vv(i.InputSimd128Register(1), i.InputSimd128Register(2), + i.InputSimd128Register(0)); + __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kRiscvI64x2SConvertI32x4Low: { + __ VU.set(kScratchReg, E64, m1); + __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + + break; + } + case kRiscvI64x2SConvertI32x4High: { + __ VU.set(kScratchReg, E32, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 2); + __ VU.set(kScratchReg, E64, m1); + __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI64x2UConvertI32x4Low: { + __ VU.set(kScratchReg, E64, m1); + __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI64x2UConvertI32x4High: { + __ VU.set(kScratchReg, E32, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 2); + __ VU.set(kScratchReg, E64, m1); + __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI32x4SConvertI16x8Low: { + __ VU.set(kScratchReg, E32, m1); + __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI32x4SConvertI16x8High: { + __ VU.set(kScratchReg, E16, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 4); + __ VU.set(kScratchReg, E32, m1); + __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI32x4SConvertF32x4: { + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + if (i.OutputSimd128Register() != i.InputSimd128Register(0)) { + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vfcvt_x_f_v(i.OutputSimd128Register(), i.InputSimd128Register(0), + Mask); + } else { + __ vmv_vx(kSimd128ScratchReg, zero_reg); + __ vfcvt_x_f_v(kSimd128ScratchReg, i.InputSimd128Register(0), Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + } + break; + } + case kRiscvI32x4UConvertF32x4: { + __ VU.set(kScratchReg, E32, m1); + __ VU.set(RoundingMode::RTZ); + __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0)); + if (i.OutputSimd128Register() != i.InputSimd128Register(0)) { + __ vmv_vx(i.OutputSimd128Register(), zero_reg); + __ vfcvt_xu_f_v(i.OutputSimd128Register(), i.InputSimd128Register(0), + Mask); + } else { + __ vmv_vx(kSimd128ScratchReg, zero_reg); + __ vfcvt_xu_f_v(kSimd128ScratchReg, i.InputSimd128Register(0), Mask); + __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg); + } + break; + } + case kRiscvI32x4UConvertI16x8Low: { + __ VU.set(kScratchReg, E32, m1); + __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI32x4UConvertI16x8High: { + __ VU.set(kScratchReg, E16, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 4); + __ VU.set(kScratchReg, E32, m1); + __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI16x8SConvertI8x16Low: { + __ VU.set(kScratchReg, E16, m1); + __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI16x8SConvertI8x16High: { + __ VU.set(kScratchReg, E8, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 8); + __ VU.set(kScratchReg, E16, m1); + __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI16x8UConvertI8x16Low: { + __ VU.set(kScratchReg, E16, m1); + __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0)); + __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI16x8UConvertI8x16High: { + __ VU.set(kScratchReg, E8, m1); + __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 8); + __ VU.set(kScratchReg, E16, m1); + __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg); + break; + } + case kRiscvI8x16SConvertI16x8: { + __ VU.set(kScratchReg, E16, m1); + __ vmv_vv(v26, i.InputSimd128Register(0)); + __ vmv_vv(v27, i.InputSimd128Register(1)); + __ VU.set(kScratchReg, E8, m1); + __ VU.set(RoundingMode::RNE); + __ vnclip_vi(i.OutputSimd128Register(), v26, 0); + break; + } + case kRiscvI8x16UConvertI16x8: { + __ VU.set(kScratchReg, E16, m1); + __ vmv_vv(v26, i.InputSimd128Register(0)); + __ vmv_vv(v27, i.InputSimd128Register(1)); + __ VU.set(kScratchReg, E16, m2); + __ vmax_vx(v26, v26, zero_reg); + __ VU.set(kScratchReg, E8, m1); + __ VU.set(RoundingMode::RNE); + __ vnclipu_vi(i.OutputSimd128Register(), v26, 0); + break; + } + case kRiscvI16x8SConvertI32x4: { + __ VU.set(kScratchReg, E32, m1); + __ vmv_vv(v26, i.InputSimd128Register(0)); + __ vmv_vv(v27, i.InputSimd128Register(1)); + __ VU.set(kScratchReg, E16, m1); + __ VU.set(RoundingMode::RNE); + __ vnclip_vi(i.OutputSimd128Register(), v26, 0); + break; + } + case kRiscvI16x8UConvertI32x4: { + __ VU.set(kScratchReg, E32, m1); + __ vmv_vv(v26, i.InputSimd128Register(0)); + __ vmv_vv(v27, i.InputSimd128Register(1)); + __ VU.set(kScratchReg, E32, m2); + __ vmax_vx(v26, v26, zero_reg); + __ VU.set(kScratchReg, E16, m1); + __ VU.set(RoundingMode::RNE); + __ vnclipu_vi(i.OutputSimd128Register(), v26, 0); + break; + } + ASSEMBLE_RVV_UNOP_INTEGER_VV(Neg, vneg_vv) + ASSEMBLE_RVV_BINOP_INTEGER(MaxU, vmaxu_vv) + ASSEMBLE_RVV_BINOP_INTEGER(MaxS, vmax_vv) + ASSEMBLE_RVV_BINOP_INTEGER(MinU, vminu_vv) + ASSEMBLE_RVV_BINOP_INTEGER(MinS, vmin_vv) + ASSEMBLE_RVV_UNOP_INTEGER_VR(Splat, vmv_vx) + ASSEMBLE_RVV_BINOP_INTEGER(Add, vadd_vv) + ASSEMBLE_RVV_BINOP_INTEGER(Sub, vsub_vv) + case kRiscvVwadd: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + __ vwadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvVwaddu: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + __ vwaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvVwmul: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + __ vwmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvVwmulu: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + __ vwmulu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + case kRiscvVmvSx: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + if (instr->InputAt(0)->IsRegister()) { + __ vmv_sx(i.OutputSimd128Register(), i.InputRegister(0)); + } else { + DCHECK(instr->InputAt(0)->IsImmediate()); + __ li(kScratchReg, i.InputInt64(0)); + __ vmv_sx(i.OutputSimd128Register(), kScratchReg); + } + break; + } + case kRiscvVcompress: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + if (instr->InputAt(1)->IsSimd128Register()) { + __ vcompress_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + } else { + DCHECK(instr->InputAt(1)->IsImmediate()); + __ li(kScratchReg, i.InputInt64(1)); + __ vmv_sx(v0, kScratchReg); + __ vcompress_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + v0); + } + break; + } + case kRiscvVaddVv: { + __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3)); + __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputSimd128Register(1)); + break; + } + default: +#ifdef DEBUG + switch (arch_opcode) { +#define Print(name) \ + case k##name: \ + printf("k%s", #name); \ + break; + TARGET_ARCH_OPCODE_LIST(Print); +#undef Print + default: + break; + } +#endif + UNIMPLEMENTED(); + } + return kSuccess; +} + +#define UNSUPPORTED_COND(opcode, condition) \ + StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ + << "\""; \ + UNIMPLEMENTED(); + +bool IsInludeEqual(Condition cc) { + switch (cc) { + case equal: + case greater_equal: + case less_equal: + case Uless_equal: + case Ugreater_equal: + return true; + default: + return false; + } +} + +void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, + Instruction* instr, FlagsCondition condition, + Label* tlabel, Label* flabel, bool fallthru) { +#undef __ +#define __ tasm-> + RiscvOperandConverter i(gen, instr); + + Condition cc = kNoCondition; + // RISC-V does not have condition code flags, so compare and branch are + // implemented differently than on the other arch's. The compare operations + // emit riscv64 pseudo-instructions, which are handled here by branch + // instructions that do the actual comparison. Essential that the input + // registers to compare pseudo-op are not modified before this branch op, as + // they are tested here. + + if (instr->arch_opcode() == kRiscvTst) { + cc = FlagsConditionToConditionTst(condition); + __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg)); + } else if (instr->arch_opcode() == kRiscvAdd64 || + instr->arch_opcode() == kRiscvSub64) { + cc = FlagsConditionToConditionOvf(condition); + __ Sra64(kScratchReg, i.OutputRegister(), 32); + __ Sra64(kScratchReg2, i.OutputRegister(), 31); + __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg)); + } else if (instr->arch_opcode() == kRiscvAddOvf64 || + instr->arch_opcode() == kRiscvSubOvf64) { + switch (condition) { + // Overflow occurs if overflow register is negative + case kOverflow: + __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg)); + break; + case kNotOverflow: + __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg)); + break; + default: + UNSUPPORTED_COND(instr->arch_opcode(), condition); + } + } else if (instr->arch_opcode() == kRiscvMulOvf32) { + // Overflow occurs if overflow register is not zero + switch (condition) { + case kOverflow: + __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg)); + break; + case kNotOverflow: + __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg)); + break; + default: + UNSUPPORTED_COND(kRiscvMulOvf32, condition); + } + } else if (instr->arch_opcode() == kRiscvCmp) { + cc = FlagsConditionToConditionCmp(condition); + __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); + } else if (instr->arch_opcode() == kRiscvCmpZero) { + cc = FlagsConditionToConditionCmp(condition); + if (i.InputOrZeroRegister(0) == zero_reg && IsInludeEqual(cc)) { + __ Branch(tlabel); + } else if (i.InputOrZeroRegister(0) != zero_reg) { + __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg)); + } + } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { + cc = FlagsConditionToConditionCmp(condition); + Register lhs_register = sp; + uint32_t offset; + if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) { + lhs_register = i.TempRegister(0); + __ Sub64(lhs_register, sp, offset); + } + __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0))); + } else if (instr->arch_opcode() == kRiscvCmpS || + instr->arch_opcode() == kRiscvCmpD) { + bool predicate; + FlagsConditionToConditionCmpFPU(&predicate, condition); + // floating-point compare result is set in kScratchReg + if (predicate) { + __ BranchTrueF(kScratchReg, tlabel); + } else { + __ BranchFalseF(kScratchReg, tlabel); + } + } else { + PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", + instr->arch_opcode()); + UNIMPLEMENTED(); + } + if (!fallthru) __ Branch(flabel); // no fallthru to flabel. +#undef __ +#define __ tasm()-> +} + +// Assembles branches after an instruction. +void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { + Label* tlabel = branch->true_label; + Label* flabel = branch->false_label; + + AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + branch->fallthru); +} + +#undef UNSUPPORTED_COND + +void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, + BranchInfo* branch) { + AssembleArchBranch(instr, branch); +} + +void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder( + RpoNumber target) { + __ Branch(GetLabel(target)); +} + +void CodeGenerator::AssembleArchTrap(Instruction* instr, + FlagsCondition condition) { + class OutOfLineTrap final : public OutOfLineCode { + public: + OutOfLineTrap(CodeGenerator* gen, Instruction* instr) + : OutOfLineCode(gen), instr_(instr), gen_(gen) {} + void Generate() final { + RiscvOperandConverter i(gen_, instr_); + TrapId trap_id = + static_cast(i.InputInt32(instr_->InputCount() - 1)); + GenerateCallToTrap(trap_id); + } + + private: + void GenerateCallToTrap(TrapId trap_id) { + if (trap_id == TrapId::kInvalid) { + // We cannot test calls to the runtime in cctest/test-run-wasm. + // Therefore we emit a call to C here instead of a call to the runtime. + // We use the context register as the scratch register, because we do + // not have a context here. + __ PrepareCallCFunction(0, 0, cp); + __ CallCFunction( + ExternalReference::wasm_call_trap_callback_for_testing(), 0); + __ LeaveFrame(StackFrame::WASM); + auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); + int pop_count = static_cast(call_descriptor->ParameterSlotCount()); + pop_count += (pop_count & 1); // align + __ Drop(pop_count); + __ Ret(); + } else { + gen_->AssembleSourcePosition(instr_); + // A direct call to a wasm runtime stub defined in this module. + // Just encode the stub index. This will be patched when the code + // is added to the native module and copied into wasm code space. + __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); + ReferenceMap* reference_map = + gen_->zone()->New(gen_->zone()); + gen_->RecordSafepoint(reference_map); + if (FLAG_debug_code) { + __ stop(); + } + } + } + Instruction* instr_; + CodeGenerator* gen_; + }; + auto ool = zone()->New(this, instr); + Label* tlabel = ool->entry(); + AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); +} + +// Assembles boolean materializations after an instruction. +void CodeGenerator::AssembleArchBoolean(Instruction* instr, + FlagsCondition condition) { + RiscvOperandConverter i(this, instr); + + // Materialize a full 32-bit 1 or 0 value. The result register is always the + // last output of the instruction. + DCHECK_NE(0u, instr->OutputCount()); + Register result = i.OutputRegister(instr->OutputCount() - 1); + Condition cc = kNoCondition; + // RISC-V does not have condition code flags, so compare and branch are + // implemented differently than on the other arch's. The compare operations + // emit riscv64 pseudo-instructions, which are checked and handled here. + + if (instr->arch_opcode() == kRiscvTst) { + cc = FlagsConditionToConditionTst(condition); + if (cc == eq) { + __ Sltu(result, kScratchReg, 1); + } else { + __ Sltu(result, zero_reg, kScratchReg); + } + return; + } else if (instr->arch_opcode() == kRiscvAdd64 || + instr->arch_opcode() == kRiscvSub64) { + cc = FlagsConditionToConditionOvf(condition); + // Check for overflow creates 1 or 0 for result. + __ Srl64(kScratchReg, i.OutputRegister(), 63); + __ Srl32(kScratchReg2, i.OutputRegister(), 31); + __ Xor(result, kScratchReg, kScratchReg2); + if (cc == eq) // Toggle result for not overflow. + __ Xor(result, result, 1); + return; + } else if (instr->arch_opcode() == kRiscvAddOvf64 || + instr->arch_opcode() == kRiscvSubOvf64) { + // Overflow occurs if overflow register is negative + __ Slt(result, kScratchReg, zero_reg); + } else if (instr->arch_opcode() == kRiscvMulOvf32) { + // Overflow occurs if overflow register is not zero + __ Sgtu(result, kScratchReg, zero_reg); + } else if (instr->arch_opcode() == kRiscvCmp) { + cc = FlagsConditionToConditionCmp(condition); + switch (cc) { + case eq: + case ne: { + Register left = i.InputOrZeroRegister(0); + Operand right = i.InputOperand(1); + if (instr->InputAt(1)->IsImmediate()) { + if (is_int12(-right.immediate())) { + if (right.immediate() == 0) { + if (cc == eq) { + __ Sltu(result, left, 1); + } else { + __ Sltu(result, zero_reg, left); + } + } else { + __ Add64(result, left, Operand(-right.immediate())); + if (cc == eq) { + __ Sltu(result, result, 1); + } else { + __ Sltu(result, zero_reg, result); + } + } + } else { + if (is_uint12(right.immediate())) { + __ Xor(result, left, right); + } else { + __ li(kScratchReg, right); + __ Xor(result, left, kScratchReg); + } + if (cc == eq) { + __ Sltu(result, result, 1); + } else { + __ Sltu(result, zero_reg, result); + } + } + } else { + __ Xor(result, left, right); + if (cc == eq) { + __ Sltu(result, result, 1); + } else { + __ Sltu(result, zero_reg, result); + } + } + } break; + case lt: + case ge: { + Register left = i.InputOrZeroRegister(0); + Operand right = i.InputOperand(1); + __ Slt(result, left, right); + if (cc == ge) { + __ Xor(result, result, 1); + } + } break; + case gt: + case le: { + Register left = i.InputOrZeroRegister(1); + Operand right = i.InputOperand(0); + __ Slt(result, left, right); + if (cc == le) { + __ Xor(result, result, 1); + } + } break; + case Uless: + case Ugreater_equal: { + Register left = i.InputOrZeroRegister(0); + Operand right = i.InputOperand(1); + __ Sltu(result, left, right); + if (cc == Ugreater_equal) { + __ Xor(result, result, 1); + } + } break; + case Ugreater: + case Uless_equal: { + Register left = i.InputRegister(1); + Operand right = i.InputOperand(0); + __ Sltu(result, left, right); + if (cc == Uless_equal) { + __ Xor(result, result, 1); + } + } break; + default: + UNREACHABLE(); + } + return; + } else if (instr->arch_opcode() == kRiscvCmpZero) { + cc = FlagsConditionToConditionCmp(condition); + switch (cc) { + case eq: { + Register left = i.InputOrZeroRegister(0); + __ Sltu(result, left, 1); + break; + } + case ne: { + Register left = i.InputOrZeroRegister(0); + __ Sltu(result, zero_reg, left); + break; + } + case lt: + case ge: { + Register left = i.InputOrZeroRegister(0); + Operand right = Operand(zero_reg); + __ Slt(result, left, right); + if (cc == ge) { + __ Xor(result, result, 1); + } + } break; + case gt: + case le: { + Operand left = i.InputOperand(0); + __ Slt(result, zero_reg, left); + if (cc == le) { + __ Xor(result, result, 1); + } + } break; + case Uless: + case Ugreater_equal: { + Register left = i.InputOrZeroRegister(0); + Operand right = Operand(zero_reg); + __ Sltu(result, left, right); + if (cc == Ugreater_equal) { + __ Xor(result, result, 1); + } + } break; + case Ugreater: + case Uless_equal: { + Register left = zero_reg; + Operand right = i.InputOperand(0); + __ Sltu(result, left, right); + if (cc == Uless_equal) { + __ Xor(result, result, 1); + } + } break; + default: + UNREACHABLE(); + } + return; + } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { + cc = FlagsConditionToConditionCmp(condition); + Register lhs_register = sp; + uint32_t offset; + if (ShouldApplyOffsetToStackCheck(instr, &offset)) { + lhs_register = i.TempRegister(0); + __ Sub64(lhs_register, sp, offset); + } + __ Sgtu(result, lhs_register, Operand(i.InputRegister(0))); + return; + } else if (instr->arch_opcode() == kRiscvCmpD || + instr->arch_opcode() == kRiscvCmpS) { + FPURegister left = i.InputOrZeroDoubleRegister(0); + FPURegister right = i.InputOrZeroDoubleRegister(1); + if ((instr->arch_opcode() == kRiscvCmpD) && + (left == kDoubleRegZero || right == kDoubleRegZero) && + !__ IsDoubleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0); + } else if ((instr->arch_opcode() == kRiscvCmpS) && + (left == kDoubleRegZero || right == kDoubleRegZero) && + !__ IsSingleZeroRegSet()) { + __ LoadFPRImmediate(kDoubleRegZero, 0.0f); + } + bool predicate; + FlagsConditionToConditionCmpFPU(&predicate, condition); + // RISCV compare returns 0 or 1, do nothing when predicate; otherwise + // toggle kScratchReg (i.e., 0 -> 1, 1 -> 0) + if (predicate) { + __ Move(result, kScratchReg); + } else { + __ Xor(result, kScratchReg, 1); + } + return; + } else { + PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", + instr->arch_opcode()); + TRACE_UNIMPL(); + UNIMPLEMENTED(); + } +} + +void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { + RiscvOperandConverter i(this, instr); + Register input = i.InputRegister(0); + std::vector> cases; + for (size_t index = 2; index < instr->InputCount(); index += 2) { + cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); + } + AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), + cases.data() + cases.size()); +} + +void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { + RiscvOperandConverter i(this, instr); + Register input = i.InputRegister(0); + size_t const case_count = instr->InputCount() - 2; + + __ Branch(GetLabel(i.InputRpo(1)), Ugreater_equal, input, + Operand(case_count)); + __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { + return GetLabel(i.InputRpo(index + 2)); + }); +} + +void CodeGenerator::FinishFrame(Frame* frame) { + auto call_descriptor = linkage()->GetIncomingDescriptor(); + + const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + if (!saves_fpu.is_empty()) { + int count = saves_fpu.Count(); + DCHECK_EQ(kNumCalleeSavedFPU, count); + frame->AllocateSavedCalleeRegisterSlots(count * + (kDoubleSize / kSystemPointerSize)); + } + + const RegList saves = call_descriptor->CalleeSavedRegisters(); + if (!saves.is_empty()) { + int count = saves.Count(); + frame->AllocateSavedCalleeRegisterSlots(count); + } +} + +void CodeGenerator::AssembleConstructFrame() { + auto call_descriptor = linkage()->GetIncomingDescriptor(); + + if (frame_access_state()->has_frame()) { + if (call_descriptor->IsCFunctionCall()) { + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ Sub64(sp, sp, Operand(kSystemPointerSize)); + } else { + __ Push(ra, fp); + __ Move(fp, sp); + } + } else if (call_descriptor->IsJSFunctionCall()) { + __ Prologue(); + } else { + __ StubPrologue(info()->GetOutputStackFrameType()); + if (call_descriptor->IsWasmFunctionCall() || + call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { + __ Push(kWasmInstanceRegister); + } + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ Sub64(sp, sp, Operand(kSystemPointerSize)); + } + } + } + + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); + + if (info()->is_osr()) { + // TurboFan OSR-compiled functions cannot be entered directly. + __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); + + // Unoptimized code jumps directly to this entrypoint while the unoptimized + // frame is still on the stack. Optimized code uses OSR values directly from + // the unoptimized frame. Thus, all that needs to be done is to allocate the + // remaining stack slots. + __ RecordComment("-- OSR entrypoint --"); + osr_pc_offset_ = __ pc_offset(); + required_slots -= osr_helper()->UnoptimizedFrameSlots(); + } + + const RegList saves = call_descriptor->CalleeSavedRegisters(); + const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + + if (required_slots > 0) { + DCHECK(frame_access_state()->has_frame()); + if (info()->IsWasm() && required_slots > 128) { + // For WebAssembly functions with big frames we have to do the stack + // overflow check before we construct the frame. Otherwise we may not + // have enough space on the stack to call the runtime for the stack + // overflow. + Label done; + + // If the frame is bigger than the stack, we throw the stack overflow + // exception unconditionally. Thereby we can avoid the integer overflow + // check in the condition code. + if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + __ Ld( + kScratchReg, + FieldMemOperand(kWasmInstanceRegister, + WasmInstanceObject::kRealStackLimitAddressOffset)); + __ Ld(kScratchReg, MemOperand(kScratchReg)); + __ Add64(kScratchReg, kScratchReg, + Operand(required_slots * kSystemPointerSize)); + __ BranchShort(&done, uge, sp, Operand(kScratchReg)); + } + + __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); + // We come from WebAssembly, there are no references for the GC. + ReferenceMap* reference_map = zone()->New(zone()); + RecordSafepoint(reference_map); + if (FLAG_debug_code) { + __ stop(); + } + + __ bind(&done); + } + } + + const int returns = frame()->GetReturnSlotCount(); + + // Skip callee-saved and return slots, which are pushed below. + required_slots -= saves.Count(); + required_slots -= saves_fpu.Count(); + required_slots -= returns; + if (required_slots > 0) { + __ Sub64(sp, sp, Operand(required_slots * kSystemPointerSize)); + } + + if (!saves_fpu.is_empty()) { + // Save callee-saved FPU registers. + __ MultiPushFPU(saves_fpu); + DCHECK_EQ(kNumCalleeSavedFPU, saves_fpu.Count()); + } + + if (!saves.is_empty()) { + // Save callee-saved registers. + __ MultiPush(saves); + } + + if (returns != 0) { + // Create space for returns. + __ Sub64(sp, sp, Operand(returns * kSystemPointerSize)); + } +} + +void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { + auto call_descriptor = linkage()->GetIncomingDescriptor(); + + const int returns = frame()->GetReturnSlotCount(); + if (returns != 0) { + __ Add64(sp, sp, Operand(returns * kSystemPointerSize)); + } + + // Restore GP registers. + const RegList saves = call_descriptor->CalleeSavedRegisters(); + if (!saves.is_empty()) { + __ MultiPop(saves); + } + + // Restore FPU registers. + const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); + if (!saves_fpu.is_empty()) { + __ MultiPopFPU(saves_fpu); + } + + RiscvOperandConverter g(this, nullptr); + + const int parameter_slots = + static_cast(call_descriptor->ParameterSlotCount()); + + // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}. + // Check RawMachineAssembler::PopAndReturn. + if (parameter_slots != 0) { + if (additional_pop_count->IsImmediate()) { + DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0); + } else if (FLAG_debug_code) { + __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue, + g.ToRegister(additional_pop_count), + Operand(static_cast(0))); + } + } + + // Functions with JS linkage have at least one parameter (the receiver). + // If {parameter_slots} == 0, it means it is a builtin with + // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping + // itself. + const bool drop_jsargs = frame_access_state()->has_frame() && + call_descriptor->IsJSFunctionCall() && + parameter_slots != 0; + + if (call_descriptor->IsCFunctionCall()) { + AssembleDeconstructFrame(); + } else if (frame_access_state()->has_frame()) { + // Canonicalize JSFunction return sites for now unless they have an variable + // number of stack slot pops. + if (additional_pop_count->IsImmediate() && + g.ToConstant(additional_pop_count).ToInt32() == 0) { + if (return_label_.is_bound()) { + __ Branch(&return_label_); + return; + } else { + __ bind(&return_label_); + } + } + if (drop_jsargs) { + // Get the actual argument count + __ Ld(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + } + AssembleDeconstructFrame(); + } + if (drop_jsargs) { + // We must pop all arguments from the stack (including the receiver). This + // number of arguments is given by max(1 + argc_reg, parameter_slots). + if (parameter_slots > 1) { + Label done; + __ li(kScratchReg, parameter_slots); + __ BranchShort(&done, ge, t0, Operand(kScratchReg)); + __ Move(t0, kScratchReg); + __ bind(&done); + } + __ Sll64(t0, t0, kSystemPointerSizeLog2); + __ Add64(sp, sp, t0); + } else if (additional_pop_count->IsImmediate()) { + // it should be a kInt32 or a kInt64 + DCHECK_LE(g.ToConstant(additional_pop_count).type(), Constant::kInt64); + int additional_count = g.ToConstant(additional_pop_count).ToInt32(); + __ Drop(parameter_slots + additional_count); + } else { + Register pop_reg = g.ToRegister(additional_pop_count); + __ Drop(parameter_slots); + __ Sll64(pop_reg, pop_reg, kSystemPointerSizeLog2); + __ Add64(sp, sp, pop_reg); + } + __ Ret(); +} + +void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); } + +void CodeGenerator::PrepareForDeoptimizationExits( + ZoneDeque* exits) { + __ ForceConstantPoolEmissionWithoutJump(); + int total_size = 0; + for (DeoptimizationExit* exit : deoptimization_exits_) { + total_size += (exit->kind() == DeoptimizeKind::kLazy) + ? Deoptimizer::kLazyDeoptExitSize + : Deoptimizer::kEagerDeoptExitSize; + } + + __ CheckTrampolinePoolQuick(total_size); +} + +void CodeGenerator::AssembleMove(InstructionOperand* source, + InstructionOperand* destination) { + RiscvOperandConverter g(this, nullptr); + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + if (source->IsRegister()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + Register src = g.ToRegister(source); + if (destination->IsRegister()) { + __ Move(g.ToRegister(destination), src); + } else { + __ Sd(src, g.ToMemOperand(destination)); + } + } else if (source->IsStackSlot()) { + DCHECK(destination->IsRegister() || destination->IsStackSlot()); + MemOperand src = g.ToMemOperand(source); + if (destination->IsRegister()) { + __ Ld(g.ToRegister(destination), src); + } else { + Register temp = kScratchReg; + __ Ld(temp, src); + __ Sd(temp, g.ToMemOperand(destination)); + } + } else if (source->IsConstant()) { + Constant src = g.ToConstant(source); + if (destination->IsRegister() || destination->IsStackSlot()) { + Register dst = + destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; + switch (src.type()) { + case Constant::kInt32: + if (src.ToInt32() == 0 && destination->IsStackSlot()) { + dst = zero_reg; + } else { + __ li(dst, Operand(src.ToInt32())); + } + break; + case Constant::kFloat32: + __ li(dst, Operand::EmbeddedNumber(src.ToFloat32())); + break; + case Constant::kInt64: + if (RelocInfo::IsWasmReference(src.rmode())) { + __ li(dst, Operand(src.ToInt64(), src.rmode())); + } else { + if (src.ToInt64() == 0 && destination->IsStackSlot()) { + dst = zero_reg; + } else { + __ li(dst, Operand(src.ToInt64())); + } + } + break; + case Constant::kFloat64: + __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value())); + break; + case Constant::kExternalReference: + __ li(dst, src.ToExternalReference()); + break; + case Constant::kDelayedStringConstant: + __ li(dst, src.ToDelayedStringConstant()); + break; + case Constant::kHeapObject: { + Handle src_object = src.ToHeapObject(); + RootIndex index; + if (IsMaterializableFromRoot(src_object, &index)) { + __ LoadRoot(dst, index); + } else { + __ li(dst, src_object); + } + break; + } + case Constant::kCompressedHeapObject: { + Handle src_object = src.ToHeapObject(); + RootIndex index; + if (IsMaterializableFromRoot(src_object, &index)) { + __ LoadRoot(dst, index); + } else { + __ li(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT); + } + break; + } + case Constant::kRpoNumber: + UNREACHABLE(); // TODO(titzer): loading RPO numbers + } + if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination)); + } else if (src.type() == Constant::kFloat32) { + if (destination->IsFPStackSlot()) { + MemOperand dst = g.ToMemOperand(destination); + if (bit_cast(src.ToFloat32()) == 0) { + __ Sw(zero_reg, dst); + } else { + __ li(kScratchReg, Operand(bit_cast(src.ToFloat32()))); + __ Sw(kScratchReg, dst); + } + } else { + DCHECK(destination->IsFPRegister()); + FloatRegister dst = g.ToSingleRegister(destination); + __ LoadFPRImmediate(dst, src.ToFloat32()); + } + } else { + DCHECK_EQ(Constant::kFloat64, src.type()); + DoubleRegister dst = destination->IsFPRegister() + ? g.ToDoubleRegister(destination) + : kScratchDoubleReg; + __ LoadFPRImmediate(dst, src.ToFloat64().value()); + if (destination->IsFPStackSlot()) { + __ StoreDouble(dst, g.ToMemOperand(destination)); + } + } + } else if (source->IsFPRegister()) { + MachineRepresentation rep = LocationOperand::cast(source)->representation(); + if (rep == MachineRepresentation::kSimd128) { + VRegister src = g.ToSimd128Register(source); + if (destination->IsSimd128Register()) { + VRegister dst = g.ToSimd128Register(destination); + __ VU.set(kScratchReg, E8, m1); + __ vmv_vv(dst, src); + } else { + DCHECK(destination->IsSimd128StackSlot()); + __ VU.set(kScratchReg, E8, m1); + MemOperand dst = g.ToMemOperand(destination); + Register dst_r = dst.rm(); + if (dst.offset() != 0) { + dst_r = kScratchReg; + __ Add64(dst_r, dst.rm(), dst.offset()); + } + __ vs(src, dst_r, 0, E8); + } + } else { + FPURegister src = g.ToDoubleRegister(source); + if (destination->IsFPRegister()) { + FPURegister dst = g.ToDoubleRegister(destination); + __ Move(dst, src); + } else { + DCHECK(destination->IsFPStackSlot()); + if (rep == MachineRepresentation::kFloat32) { + __ StoreFloat(src, g.ToMemOperand(destination)); + } else { + DCHECK_EQ(rep, MachineRepresentation::kFloat64); + __ StoreDouble(src, g.ToMemOperand(destination)); + } + } + } + } else if (source->IsFPStackSlot()) { + DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); + MemOperand src = g.ToMemOperand(source); + MachineRepresentation rep = LocationOperand::cast(source)->representation(); + if (rep == MachineRepresentation::kSimd128) { + __ VU.set(kScratchReg, E8, m1); + Register src_r = src.rm(); + if (src.offset() != 0) { + src_r = kScratchReg; + __ Add64(src_r, src.rm(), src.offset()); + } + if (destination->IsSimd128Register()) { + __ vl(g.ToSimd128Register(destination), src_r, 0, E8); + } else { + DCHECK(destination->IsSimd128StackSlot()); + VRegister temp = kSimd128ScratchReg; + MemOperand dst = g.ToMemOperand(destination); + Register dst_r = dst.rm(); + if (dst.offset() != 0) { + dst_r = kScratchReg2; + __ Add64(dst_r, dst.rm(), dst.offset()); + } + __ vl(temp, src_r, 0, E8); + __ vs(temp, dst_r, 0, E8); + } + } else { + if (destination->IsFPRegister()) { + if (rep == MachineRepresentation::kFloat32) { + __ LoadFloat(g.ToDoubleRegister(destination), src); + } else { + DCHECK_EQ(rep, MachineRepresentation::kFloat64); + __ LoadDouble(g.ToDoubleRegister(destination), src); + } + } else { + DCHECK(destination->IsFPStackSlot()); + FPURegister temp = kScratchDoubleReg; + if (rep == MachineRepresentation::kFloat32) { + __ LoadFloat(temp, src); + __ StoreFloat(temp, g.ToMemOperand(destination)); + } else { + DCHECK_EQ(rep, MachineRepresentation::kFloat64); + __ LoadDouble(temp, src); + __ StoreDouble(temp, g.ToMemOperand(destination)); + } + } + } + } else { + UNREACHABLE(); + } +} + +void CodeGenerator::AssembleSwap(InstructionOperand* source, + InstructionOperand* destination) { + RiscvOperandConverter g(this, nullptr); + switch (MoveType::InferSwap(source, destination)) { + case MoveType::kRegisterToRegister: + if (source->IsRegister()) { + Register temp = kScratchReg; + Register src = g.ToRegister(source); + Register dst = g.ToRegister(destination); + __ Move(temp, src); + __ Move(src, dst); + __ Move(dst, temp); + } else { + if (source->IsFloatRegister() || source->IsDoubleRegister()) { + FPURegister temp = kScratchDoubleReg; + FPURegister src = g.ToDoubleRegister(source); + FPURegister dst = g.ToDoubleRegister(destination); + __ Move(temp, src); + __ Move(src, dst); + __ Move(dst, temp); + } else { + DCHECK(source->IsSimd128Register()); + VRegister src = g.ToDoubleRegister(source).toV(); + VRegister dst = g.ToDoubleRegister(destination).toV(); + VRegister temp = kSimd128ScratchReg; + __ VU.set(kScratchReg, E8, m1); + __ vmv_vv(temp, src); + __ vmv_vv(src, dst); + __ vmv_vv(dst, temp); + } + } + return; + case MoveType::kRegisterToStack: { + MemOperand dst = g.ToMemOperand(destination); + if (source->IsRegister()) { + Register temp = kScratchReg; + Register src = g.ToRegister(source); + __ mv(temp, src); + __ Ld(src, dst); + __ Sd(temp, dst); + } else { + MemOperand dst = g.ToMemOperand(destination); + if (source->IsFloatRegister()) { + DoubleRegister src = g.ToDoubleRegister(source); + DoubleRegister temp = kScratchDoubleReg; + __ fmv_s(temp, src); + __ LoadFloat(src, dst); + __ StoreFloat(temp, dst); + } else if (source->IsDoubleRegister()) { + DoubleRegister src = g.ToDoubleRegister(source); + DoubleRegister temp = kScratchDoubleReg; + __ fmv_d(temp, src); + __ LoadDouble(src, dst); + __ StoreDouble(temp, dst); + } else { + DCHECK(source->IsSimd128Register()); + VRegister src = g.ToDoubleRegister(source).toV(); + VRegister temp = kSimd128ScratchReg; + __ VU.set(kScratchReg, E8, m1); + __ vmv_vv(temp, src); + Register dst_v = dst.rm(); + if (dst.offset() != 0) { + dst_v = kScratchReg2; + __ Add64(dst_v, dst.rm(), Operand(dst.offset())); + } + __ vl(src, dst_v, 0, E8); + __ vs(temp, dst_v, 0, E8); + } + } + return; + } + case MoveType::kStackToStack: { + MemOperand src = g.ToMemOperand(source); + MemOperand dst = g.ToMemOperand(destination); + if (source->IsSimd128StackSlot()) { + __ VU.set(kScratchReg, E8, m1); + Register src_v = src.rm(); + Register dst_v = dst.rm(); + if (src.offset() != 0) { + src_v = kScratchReg; + __ Add64(src_v, src.rm(), Operand(src.offset())); + } + if (dst.offset() != 0) { + dst_v = kScratchReg2; + __ Add64(dst_v, dst.rm(), Operand(dst.offset())); + } + __ vl(kSimd128ScratchReg, src_v, 0, E8); + __ vl(kSimd128ScratchReg2, dst_v, 0, E8); + __ vs(kSimd128ScratchReg, dst_v, 0, E8); + __ vs(kSimd128ScratchReg2, src_v, 0, E8); + } else { + UseScratchRegisterScope scope(tasm()); + Register temp_0 = kScratchReg; + Register temp_1 = kScratchReg2; + __ Ld(temp_0, src); + __ Ld(temp_1, dst); + __ Sd(temp_0, dst); + __ Sd(temp_1, src); + } + return; + } + default: + UNREACHABLE(); + } +} + +void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { + // On 64-bit RISC-V we emit the jump tables inline. + UNREACHABLE(); +} + +#undef ASSEMBLE_ATOMIC_LOAD_INTEGER +#undef ASSEMBLE_ATOMIC_STORE_INTEGER +#undef ASSEMBLE_ATOMIC_BINOP +#undef ASSEMBLE_ATOMIC_BINOP_EXT +#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER +#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT +#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER +#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT +#undef ASSEMBLE_IEEE754_BINOP +#undef ASSEMBLE_IEEE754_UNOP + +#undef TRACE_MSG +#undef TRACE_UNIMPL +#undef __ + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h new file mode 100644 index 00000000000000..aa0d446d22bc6f --- /dev/null +++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h @@ -0,0 +1,434 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_ +#define V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_ + +namespace v8 { +namespace internal { +namespace compiler { + +// RISC-V-specific opcodes that specify which assembly sequence to emit. +// Most opcodes specify a single instruction. +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(RiscvAdd32) \ + V(RiscvAdd64) \ + V(RiscvAddOvf64) \ + V(RiscvSub32) \ + V(RiscvSub64) \ + V(RiscvSubOvf64) \ + V(RiscvMul32) \ + V(RiscvMulOvf32) \ + V(RiscvMulHigh32) \ + V(RiscvMulHigh64) \ + V(RiscvMulHighU32) \ + V(RiscvMul64) \ + V(RiscvDiv32) \ + V(RiscvDiv64) \ + V(RiscvDivU32) \ + V(RiscvDivU64) \ + V(RiscvMod32) \ + V(RiscvMod64) \ + V(RiscvModU32) \ + V(RiscvModU64) \ + V(RiscvAnd) \ + V(RiscvAnd32) \ + V(RiscvOr) \ + V(RiscvOr32) \ + V(RiscvNor) \ + V(RiscvNor32) \ + V(RiscvXor) \ + V(RiscvXor32) \ + V(RiscvClz32) \ + V(RiscvShl32) \ + V(RiscvShr32) \ + V(RiscvSar32) \ + V(RiscvZeroExtendWord) \ + V(RiscvSignExtendWord) \ + V(RiscvClz64) \ + V(RiscvCtz32) \ + V(RiscvCtz64) \ + V(RiscvPopcnt32) \ + V(RiscvPopcnt64) \ + V(RiscvShl64) \ + V(RiscvShr64) \ + V(RiscvSar64) \ + V(RiscvRor32) \ + V(RiscvRor64) \ + V(RiscvMov) \ + V(RiscvTst) \ + V(RiscvCmp) \ + V(RiscvCmpZero) \ + V(RiscvCmpS) \ + V(RiscvAddS) \ + V(RiscvSubS) \ + V(RiscvMulS) \ + V(RiscvDivS) \ + V(RiscvModS) \ + V(RiscvAbsS) \ + V(RiscvNegS) \ + V(RiscvSqrtS) \ + V(RiscvMaxS) \ + V(RiscvMinS) \ + V(RiscvCmpD) \ + V(RiscvAddD) \ + V(RiscvSubD) \ + V(RiscvMulD) \ + V(RiscvDivD) \ + V(RiscvModD) \ + V(RiscvAbsD) \ + V(RiscvNegD) \ + V(RiscvSqrtD) \ + V(RiscvMaxD) \ + V(RiscvMinD) \ + V(RiscvFloat64RoundDown) \ + V(RiscvFloat64RoundTruncate) \ + V(RiscvFloat64RoundUp) \ + V(RiscvFloat64RoundTiesEven) \ + V(RiscvFloat32RoundDown) \ + V(RiscvFloat32RoundTruncate) \ + V(RiscvFloat32RoundUp) \ + V(RiscvFloat32RoundTiesEven) \ + V(RiscvCvtSD) \ + V(RiscvCvtDS) \ + V(RiscvTruncWD) \ + V(RiscvRoundWD) \ + V(RiscvFloorWD) \ + V(RiscvCeilWD) \ + V(RiscvTruncWS) \ + V(RiscvRoundWS) \ + V(RiscvFloorWS) \ + V(RiscvCeilWS) \ + V(RiscvTruncLS) \ + V(RiscvTruncLD) \ + V(RiscvTruncUwD) \ + V(RiscvTruncUwS) \ + V(RiscvTruncUlS) \ + V(RiscvTruncUlD) \ + V(RiscvCvtDW) \ + V(RiscvCvtSL) \ + V(RiscvCvtSW) \ + V(RiscvCvtSUw) \ + V(RiscvCvtSUl) \ + V(RiscvCvtDL) \ + V(RiscvCvtDUw) \ + V(RiscvCvtDUl) \ + V(RiscvLb) \ + V(RiscvLbu) \ + V(RiscvSb) \ + V(RiscvLh) \ + V(RiscvUlh) \ + V(RiscvLhu) \ + V(RiscvUlhu) \ + V(RiscvSh) \ + V(RiscvUsh) \ + V(RiscvLd) \ + V(RiscvUld) \ + V(RiscvLw) \ + V(RiscvUlw) \ + V(RiscvLwu) \ + V(RiscvUlwu) \ + V(RiscvSw) \ + V(RiscvUsw) \ + V(RiscvSd) \ + V(RiscvUsd) \ + V(RiscvLoadFloat) \ + V(RiscvULoadFloat) \ + V(RiscvStoreFloat) \ + V(RiscvUStoreFloat) \ + V(RiscvLoadDouble) \ + V(RiscvULoadDouble) \ + V(RiscvStoreDouble) \ + V(RiscvUStoreDouble) \ + V(RiscvBitcastDL) \ + V(RiscvBitcastLD) \ + V(RiscvBitcastInt32ToFloat32) \ + V(RiscvBitcastFloat32ToInt32) \ + V(RiscvFloat64ExtractLowWord32) \ + V(RiscvFloat64ExtractHighWord32) \ + V(RiscvFloat64InsertLowWord32) \ + V(RiscvFloat64InsertHighWord32) \ + V(RiscvFloat32Max) \ + V(RiscvFloat64Max) \ + V(RiscvFloat32Min) \ + V(RiscvFloat64Min) \ + V(RiscvFloat64SilenceNaN) \ + V(RiscvPush) \ + V(RiscvPeek) \ + V(RiscvByteSwap64) \ + V(RiscvByteSwap32) \ + V(RiscvStoreToStackSlot) \ + V(RiscvStackClaim) \ + V(RiscvSignExtendByte) \ + V(RiscvSignExtendShort) \ + V(RiscvSync) \ + V(RiscvAssertEqual) \ + V(RiscvS128Const) \ + V(RiscvS128Zero) \ + V(RiscvS128AllOnes) \ + V(RiscvI32x4Splat) \ + V(RiscvI32x4ExtractLane) \ + V(RiscvI32x4ReplaceLane) \ + V(RiscvI32x4Add) \ + V(RiscvI32x4Sub) \ + V(RiscvF64x2Abs) \ + V(RiscvF64x2Neg) \ + V(RiscvF32x4Splat) \ + V(RiscvF32x4ExtractLane) \ + V(RiscvF32x4ReplaceLane) \ + V(RiscvF32x4SConvertI32x4) \ + V(RiscvF32x4UConvertI32x4) \ + V(RiscvI64x2SConvertI32x4Low) \ + V(RiscvI64x2SConvertI32x4High) \ + V(RiscvI64x2UConvertI32x4Low) \ + V(RiscvI64x2UConvertI32x4High) \ + V(RiscvI32x4Mul) \ + V(RiscvI32x4MaxS) \ + V(RiscvI32x4MinS) \ + V(RiscvI32x4Eq) \ + V(RiscvI32x4Ne) \ + V(RiscvI32x4Shl) \ + V(RiscvI32x4ShrS) \ + V(RiscvI32x4ShrU) \ + V(RiscvI32x4MaxU) \ + V(RiscvI32x4MinU) \ + V(RiscvI64x2GtS) \ + V(RiscvI64x2GeS) \ + V(RiscvI64x2Eq) \ + V(RiscvI64x2Ne) \ + V(RiscvF64x2Sqrt) \ + V(RiscvF64x2Add) \ + V(RiscvF64x2Sub) \ + V(RiscvF64x2Mul) \ + V(RiscvF64x2Div) \ + V(RiscvF64x2Min) \ + V(RiscvF64x2Max) \ + V(RiscvF64x2ConvertLowI32x4S) \ + V(RiscvF64x2ConvertLowI32x4U) \ + V(RiscvF64x2PromoteLowF32x4) \ + V(RiscvF64x2Eq) \ + V(RiscvF64x2Ne) \ + V(RiscvF64x2Lt) \ + V(RiscvF64x2Le) \ + V(RiscvF64x2Splat) \ + V(RiscvF64x2ExtractLane) \ + V(RiscvF64x2ReplaceLane) \ + V(RiscvF64x2Pmin) \ + V(RiscvF64x2Pmax) \ + V(RiscvF64x2Ceil) \ + V(RiscvF64x2Floor) \ + V(RiscvF64x2Trunc) \ + V(RiscvF64x2NearestInt) \ + V(RiscvI64x2Splat) \ + V(RiscvI64x2ExtractLane) \ + V(RiscvI64x2ReplaceLane) \ + V(RiscvI64x2Add) \ + V(RiscvI64x2Sub) \ + V(RiscvI64x2Mul) \ + V(RiscvI64x2Abs) \ + V(RiscvI64x2Neg) \ + V(RiscvI64x2Shl) \ + V(RiscvI64x2ShrS) \ + V(RiscvI64x2ShrU) \ + V(RiscvI64x2BitMask) \ + V(RiscvF32x4Abs) \ + V(RiscvF32x4Neg) \ + V(RiscvF32x4Sqrt) \ + V(RiscvF32x4RecipApprox) \ + V(RiscvF32x4RecipSqrtApprox) \ + V(RiscvF32x4Qfma) \ + V(RiscvF32x4Qfms) \ + V(RiscvF64x2Qfma) \ + V(RiscvF64x2Qfms) \ + V(RiscvF32x4Add) \ + V(RiscvF32x4Sub) \ + V(RiscvF32x4Mul) \ + V(RiscvF32x4Div) \ + V(RiscvF32x4Max) \ + V(RiscvF32x4Min) \ + V(RiscvF32x4Eq) \ + V(RiscvF32x4Ne) \ + V(RiscvF32x4Lt) \ + V(RiscvF32x4Le) \ + V(RiscvF32x4Pmin) \ + V(RiscvF32x4Pmax) \ + V(RiscvF32x4DemoteF64x2Zero) \ + V(RiscvF32x4Ceil) \ + V(RiscvF32x4Floor) \ + V(RiscvF32x4Trunc) \ + V(RiscvF32x4NearestInt) \ + V(RiscvI32x4SConvertF32x4) \ + V(RiscvI32x4UConvertF32x4) \ + V(RiscvI32x4Neg) \ + V(RiscvI32x4GtS) \ + V(RiscvI32x4GeS) \ + V(RiscvI32x4GtU) \ + V(RiscvI32x4GeU) \ + V(RiscvI32x4Abs) \ + V(RiscvI32x4BitMask) \ + V(RiscvI32x4TruncSatF64x2SZero) \ + V(RiscvI32x4TruncSatF64x2UZero) \ + V(RiscvI16x8Splat) \ + V(RiscvI16x8ExtractLaneU) \ + V(RiscvI16x8ExtractLaneS) \ + V(RiscvI16x8ReplaceLane) \ + V(RiscvI16x8Neg) \ + V(RiscvI16x8Shl) \ + V(RiscvI16x8ShrS) \ + V(RiscvI16x8ShrU) \ + V(RiscvI16x8Add) \ + V(RiscvI16x8AddSatS) \ + V(RiscvI16x8Sub) \ + V(RiscvI16x8SubSatS) \ + V(RiscvI16x8Mul) \ + V(RiscvI16x8MaxS) \ + V(RiscvI16x8MinS) \ + V(RiscvI16x8Eq) \ + V(RiscvI16x8Ne) \ + V(RiscvI16x8GtS) \ + V(RiscvI16x8GeS) \ + V(RiscvI16x8AddSatU) \ + V(RiscvI16x8SubSatU) \ + V(RiscvI16x8MaxU) \ + V(RiscvI16x8MinU) \ + V(RiscvI16x8GtU) \ + V(RiscvI16x8GeU) \ + V(RiscvI16x8RoundingAverageU) \ + V(RiscvI16x8Q15MulRSatS) \ + V(RiscvI16x8Abs) \ + V(RiscvI16x8BitMask) \ + V(RiscvI8x16Splat) \ + V(RiscvI8x16ExtractLaneU) \ + V(RiscvI8x16ExtractLaneS) \ + V(RiscvI8x16ReplaceLane) \ + V(RiscvI8x16Neg) \ + V(RiscvI8x16Shl) \ + V(RiscvI8x16ShrS) \ + V(RiscvI8x16Add) \ + V(RiscvI8x16AddSatS) \ + V(RiscvI8x16Sub) \ + V(RiscvI8x16SubSatS) \ + V(RiscvI8x16MaxS) \ + V(RiscvI8x16MinS) \ + V(RiscvI8x16Eq) \ + V(RiscvI8x16Ne) \ + V(RiscvI8x16GtS) \ + V(RiscvI8x16GeS) \ + V(RiscvI8x16ShrU) \ + V(RiscvI8x16AddSatU) \ + V(RiscvI8x16SubSatU) \ + V(RiscvI8x16MaxU) \ + V(RiscvI8x16MinU) \ + V(RiscvI8x16GtU) \ + V(RiscvI8x16GeU) \ + V(RiscvI8x16RoundingAverageU) \ + V(RiscvI8x16Abs) \ + V(RiscvI8x16BitMask) \ + V(RiscvI8x16Popcnt) \ + V(RiscvS128And) \ + V(RiscvS128Or) \ + V(RiscvS128Xor) \ + V(RiscvS128Not) \ + V(RiscvS128Select) \ + V(RiscvS128AndNot) \ + V(RiscvS128Load64Zero) \ + V(RiscvS128Load32Zero) \ + V(RiscvI32x4AllTrue) \ + V(RiscvI16x8AllTrue) \ + V(RiscvV128AnyTrue) \ + V(RiscvI8x16AllTrue) \ + V(RiscvI64x2AllTrue) \ + V(RiscvS32x4InterleaveRight) \ + V(RiscvS32x4InterleaveLeft) \ + V(RiscvS32x4PackEven) \ + V(RiscvS32x4PackOdd) \ + V(RiscvS32x4InterleaveEven) \ + V(RiscvS32x4InterleaveOdd) \ + V(RiscvS32x4Shuffle) \ + V(RiscvS16x8InterleaveRight) \ + V(RiscvS16x8InterleaveLeft) \ + V(RiscvS16x8PackEven) \ + V(RiscvS16x8PackOdd) \ + V(RiscvS16x8InterleaveEven) \ + V(RiscvS16x8InterleaveOdd) \ + V(RiscvS16x4Reverse) \ + V(RiscvS16x2Reverse) \ + V(RiscvS8x16InterleaveRight) \ + V(RiscvS8x16InterleaveLeft) \ + V(RiscvS8x16PackEven) \ + V(RiscvS8x16PackOdd) \ + V(RiscvS8x16InterleaveEven) \ + V(RiscvS8x16InterleaveOdd) \ + V(RiscvI8x16Shuffle) \ + V(RiscvS8x16Concat) \ + V(RiscvS8x8Reverse) \ + V(RiscvS8x4Reverse) \ + V(RiscvS8x2Reverse) \ + V(RiscvS128LoadSplat) \ + V(RiscvS128Load64ExtendS) \ + V(RiscvS128Load64ExtendU) \ + V(RiscvS128LoadLane) \ + V(RiscvS128StoreLane) \ + V(RiscvRvvLd) \ + V(RiscvRvvSt) \ + V(RiscvI32x4SConvertI16x8Low) \ + V(RiscvI32x4SConvertI16x8High) \ + V(RiscvI32x4UConvertI16x8Low) \ + V(RiscvI32x4UConvertI16x8High) \ + V(RiscvI16x8SConvertI8x16Low) \ + V(RiscvI16x8SConvertI8x16High) \ + V(RiscvI16x8SConvertI32x4) \ + V(RiscvI16x8UConvertI32x4) \ + V(RiscvI16x8UConvertI8x16Low) \ + V(RiscvI16x8UConvertI8x16High) \ + V(RiscvI8x16SConvertI16x8) \ + V(RiscvI8x16UConvertI16x8) \ + V(RiscvVwmul) \ + V(RiscvVwmulu) \ + V(RiscvVmvSx) \ + V(RiscvVcompress) \ + V(RiscvVaddVv) \ + V(RiscvVwadd) \ + V(RiscvVwaddu) \ + V(RiscvVrgather) \ + V(RiscvVslidedown) \ + V(RiscvWord64AtomicLoadUint64) \ + V(RiscvWord64AtomicStoreWord64) \ + V(RiscvWord64AtomicAddUint64) \ + V(RiscvWord64AtomicSubUint64) \ + V(RiscvWord64AtomicAndUint64) \ + V(RiscvWord64AtomicOrUint64) \ + V(RiscvWord64AtomicXorUint64) \ + V(RiscvWord64AtomicExchangeUint64) \ + V(RiscvWord64AtomicCompareExchangeUint64) \ + V(RiscvStoreCompressTagged) \ + V(RiscvLoadDecompressTaggedSigned) \ + V(RiscvLoadDecompressTaggedPointer) \ + V(RiscvLoadDecompressAnyTagged) + +// Addressing modes represent the "shape" of inputs to an instruction. +// Many instructions support multiple addressing modes. Addressing modes +// are encoded into the InstructionCode of the instruction and tell the +// code generator after register allocation which assembler method to call. +// +// We use the following local notation for addressing modes: +// +// R = register +// O = register or stack slot +// D = double register +// I = immediate (handle, external, int32) +// MRI = [register + immediate] +// MRR = [register + register] +// Root = [kRootregister + immediate] +// TODO(plind): Add the new r6 address modes. +#define TARGET_ADDRESSING_MODE_LIST(V) \ + V(MRI) /* [%r0 + K] */ \ + V(MRR) /* [%r0 + %r1] */ \ + V(Root) /* [root + k] */ + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_ diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc new file mode 100644 index 00000000000000..23e06507d9d84c --- /dev/null +++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc @@ -0,0 +1,1561 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/macro-assembler.h" +#include "src/compiler/backend/instruction-scheduler.h" + +namespace v8 { +namespace internal { +namespace compiler { + +bool InstructionScheduler::SchedulerSupported() { return true; } + +int InstructionScheduler::GetTargetInstructionFlags( + const Instruction* instr) const { + switch (instr->arch_opcode()) { + case kRiscvAbsD: + case kRiscvAbsS: + case kRiscvAdd32: + case kRiscvAddD: + case kRiscvAddS: + case kRiscvAnd: + case kRiscvAnd32: + case kRiscvAssertEqual: + case kRiscvBitcastDL: + case kRiscvBitcastLD: + case kRiscvBitcastInt32ToFloat32: + case kRiscvBitcastFloat32ToInt32: + case kRiscvByteSwap32: + case kRiscvByteSwap64: + case kRiscvCeilWD: + case kRiscvCeilWS: + case kRiscvClz32: + case kRiscvCmp: + case kRiscvCmpZero: + case kRiscvCmpD: + case kRiscvCmpS: + case kRiscvCtz32: + case kRiscvCvtDL: + case kRiscvCvtDS: + case kRiscvCvtDUl: + case kRiscvCvtDUw: + case kRiscvCvtDW: + case kRiscvCvtSD: + case kRiscvCvtSL: + case kRiscvCvtSUl: + case kRiscvCvtSUw: + case kRiscvCvtSW: + case kRiscvMulHigh64: + case kRiscvMulHighU32: + case kRiscvAdd64: + case kRiscvAddOvf64: + case kRiscvClz64: + case kRiscvCtz64: + case kRiscvDiv64: + case kRiscvDivU64: + case kRiscvZeroExtendWord: + case kRiscvSignExtendWord: + case kRiscvDiv32: + case kRiscvDivD: + case kRiscvDivS: + case kRiscvDivU32: + case kRiscvMod64: + case kRiscvModU64: + case kRiscvMul64: + case kRiscvPopcnt64: + case kRiscvRor64: + case kRiscvSar64: + case kRiscvShl64: + case kRiscvShr64: + case kRiscvSub64: + case kRiscvSubOvf64: + case kRiscvF64x2Abs: + case kRiscvF64x2Neg: + case kRiscvF64x2Sqrt: + case kRiscvF64x2Add: + case kRiscvF64x2Sub: + case kRiscvF64x2Mul: + case kRiscvF64x2Div: + case kRiscvF64x2Min: + case kRiscvF64x2Max: + case kRiscvF64x2Eq: + case kRiscvF64x2Ne: + case kRiscvF64x2Lt: + case kRiscvF64x2Le: + case kRiscvF64x2Pmin: + case kRiscvF64x2Pmax: + case kRiscvF64x2ConvertLowI32x4S: + case kRiscvF64x2ConvertLowI32x4U: + case kRiscvF64x2PromoteLowF32x4: + case kRiscvF64x2Ceil: + case kRiscvF64x2Floor: + case kRiscvF64x2Trunc: + case kRiscvF64x2NearestInt: + case kRiscvI64x2Splat: + case kRiscvI64x2ExtractLane: + case kRiscvI64x2ReplaceLane: + case kRiscvI64x2Add: + case kRiscvI64x2Sub: + case kRiscvI64x2Mul: + case kRiscvI64x2Neg: + case kRiscvI64x2Abs: + case kRiscvI64x2Shl: + case kRiscvI64x2ShrS: + case kRiscvI64x2ShrU: + case kRiscvI64x2BitMask: + case kRiscvI64x2GtS: + case kRiscvI64x2GeS: + case kRiscvF32x4Abs: + case kRiscvF32x4Add: + case kRiscvF32x4Eq: + case kRiscvF32x4ExtractLane: + case kRiscvF32x4Lt: + case kRiscvF32x4Le: + case kRiscvF32x4Max: + case kRiscvF32x4Min: + case kRiscvF32x4Mul: + case kRiscvF32x4Div: + case kRiscvF32x4Ne: + case kRiscvF32x4Neg: + case kRiscvF32x4Sqrt: + case kRiscvF32x4RecipApprox: + case kRiscvF32x4RecipSqrtApprox: + case kRiscvF64x2Qfma: + case kRiscvF64x2Qfms: + case kRiscvF32x4Qfma: + case kRiscvF32x4Qfms: + case kRiscvF32x4ReplaceLane: + case kRiscvF32x4SConvertI32x4: + case kRiscvF32x4Splat: + case kRiscvF32x4Sub: + case kRiscvF32x4UConvertI32x4: + case kRiscvF32x4Pmin: + case kRiscvF32x4Pmax: + case kRiscvF32x4DemoteF64x2Zero: + case kRiscvF32x4Ceil: + case kRiscvF32x4Floor: + case kRiscvF32x4Trunc: + case kRiscvF32x4NearestInt: + case kRiscvI64x2Eq: + case kRiscvI64x2Ne: + case kRiscvF64x2Splat: + case kRiscvF64x2ExtractLane: + case kRiscvF64x2ReplaceLane: + case kRiscvFloat32Max: + case kRiscvFloat32Min: + case kRiscvFloat32RoundDown: + case kRiscvFloat32RoundTiesEven: + case kRiscvFloat32RoundTruncate: + case kRiscvFloat32RoundUp: + case kRiscvFloat64ExtractLowWord32: + case kRiscvFloat64ExtractHighWord32: + case kRiscvFloat64InsertLowWord32: + case kRiscvFloat64InsertHighWord32: + case kRiscvFloat64Max: + case kRiscvFloat64Min: + case kRiscvFloat64RoundDown: + case kRiscvFloat64RoundTiesEven: + case kRiscvFloat64RoundTruncate: + case kRiscvFloat64RoundUp: + case kRiscvFloat64SilenceNaN: + case kRiscvFloorWD: + case kRiscvFloorWS: + case kRiscvI64x2SConvertI32x4Low: + case kRiscvI64x2SConvertI32x4High: + case kRiscvI64x2UConvertI32x4Low: + case kRiscvI64x2UConvertI32x4High: + case kRiscvI16x8Add: + case kRiscvI16x8AddSatS: + case kRiscvI16x8AddSatU: + case kRiscvI16x8Eq: + case kRiscvI16x8ExtractLaneU: + case kRiscvI16x8ExtractLaneS: + case kRiscvI16x8GeS: + case kRiscvI16x8GeU: + case kRiscvI16x8GtS: + case kRiscvI16x8GtU: + case kRiscvI16x8MaxS: + case kRiscvI16x8MaxU: + case kRiscvI16x8MinS: + case kRiscvI16x8MinU: + case kRiscvI16x8Mul: + case kRiscvI16x8Ne: + case kRiscvI16x8Neg: + case kRiscvI16x8ReplaceLane: + case kRiscvI8x16SConvertI16x8: + case kRiscvI16x8SConvertI32x4: + case kRiscvI16x8SConvertI8x16High: + case kRiscvI16x8SConvertI8x16Low: + case kRiscvI16x8Shl: + case kRiscvI16x8ShrS: + case kRiscvI16x8ShrU: + case kRiscvI32x4TruncSatF64x2SZero: + case kRiscvI32x4TruncSatF64x2UZero: + case kRiscvI16x8Splat: + case kRiscvI16x8Sub: + case kRiscvI16x8SubSatS: + case kRiscvI16x8SubSatU: + case kRiscvI8x16UConvertI16x8: + case kRiscvI16x8UConvertI32x4: + case kRiscvI16x8UConvertI8x16High: + case kRiscvI16x8UConvertI8x16Low: + case kRiscvI16x8RoundingAverageU: + case kRiscvI16x8Q15MulRSatS: + case kRiscvI16x8Abs: + case kRiscvI16x8BitMask: + case kRiscvI32x4Add: + case kRiscvI32x4Eq: + case kRiscvI32x4ExtractLane: + case kRiscvI32x4GeS: + case kRiscvI32x4GeU: + case kRiscvI32x4GtS: + case kRiscvI32x4GtU: + case kRiscvI32x4MaxS: + case kRiscvI32x4MaxU: + case kRiscvI32x4MinS: + case kRiscvI32x4MinU: + case kRiscvI32x4Mul: + case kRiscvI32x4Ne: + case kRiscvI32x4Neg: + case kRiscvI32x4ReplaceLane: + case kRiscvI32x4SConvertF32x4: + case kRiscvI32x4SConvertI16x8High: + case kRiscvI32x4SConvertI16x8Low: + case kRiscvI32x4Shl: + case kRiscvI32x4ShrS: + case kRiscvI32x4ShrU: + case kRiscvI32x4Splat: + case kRiscvI32x4Sub: + case kRiscvI32x4UConvertF32x4: + case kRiscvI32x4UConvertI16x8High: + case kRiscvI32x4UConvertI16x8Low: + case kRiscvI32x4Abs: + case kRiscvI32x4BitMask: + case kRiscvI8x16Add: + case kRiscvI8x16AddSatS: + case kRiscvI8x16AddSatU: + case kRiscvI8x16Eq: + case kRiscvI8x16ExtractLaneU: + case kRiscvI8x16ExtractLaneS: + case kRiscvI8x16GeS: + case kRiscvI8x16GeU: + case kRiscvI8x16GtS: + case kRiscvI8x16GtU: + case kRiscvI8x16MaxS: + case kRiscvI8x16MaxU: + case kRiscvI8x16MinS: + case kRiscvI8x16MinU: + case kRiscvI8x16Ne: + case kRiscvI8x16Neg: + case kRiscvI8x16ReplaceLane: + case kRiscvI8x16Shl: + case kRiscvI8x16ShrS: + case kRiscvI8x16ShrU: + case kRiscvI8x16Splat: + case kRiscvI8x16Sub: + case kRiscvI8x16SubSatS: + case kRiscvI8x16SubSatU: + case kRiscvI8x16RoundingAverageU: + case kRiscvI8x16Abs: + case kRiscvI8x16BitMask: + case kRiscvI8x16Popcnt: + case kRiscvMaxD: + case kRiscvMaxS: + case kRiscvMinD: + case kRiscvMinS: + case kRiscvMod32: + case kRiscvModU32: + case kRiscvMov: + case kRiscvMul32: + case kRiscvMulD: + case kRiscvMulHigh32: + case kRiscvMulOvf32: + case kRiscvMulS: + case kRiscvNegD: + case kRiscvNegS: + case kRiscvNor: + case kRiscvNor32: + case kRiscvOr: + case kRiscvOr32: + case kRiscvPopcnt32: + case kRiscvRor32: + case kRiscvRoundWD: + case kRiscvRoundWS: + case kRiscvS128And: + case kRiscvS128Or: + case kRiscvS128Not: + case kRiscvS128Select: + case kRiscvS128AndNot: + case kRiscvS128Xor: + case kRiscvS128Const: + case kRiscvS128Zero: + case kRiscvS128Load32Zero: + case kRiscvS128Load64Zero: + case kRiscvS128AllOnes: + case kRiscvS16x8InterleaveEven: + case kRiscvS16x8InterleaveOdd: + case kRiscvS16x8InterleaveLeft: + case kRiscvS16x8InterleaveRight: + case kRiscvS16x8PackEven: + case kRiscvS16x8PackOdd: + case kRiscvS16x2Reverse: + case kRiscvS16x4Reverse: + case kRiscvI8x16AllTrue: + case kRiscvI32x4AllTrue: + case kRiscvI16x8AllTrue: + case kRiscvV128AnyTrue: + case kRiscvI64x2AllTrue: + case kRiscvS32x4InterleaveEven: + case kRiscvS32x4InterleaveOdd: + case kRiscvS32x4InterleaveLeft: + case kRiscvS32x4InterleaveRight: + case kRiscvS32x4PackEven: + case kRiscvS32x4PackOdd: + case kRiscvS32x4Shuffle: + case kRiscvS8x16Concat: + case kRiscvS8x16InterleaveEven: + case kRiscvS8x16InterleaveOdd: + case kRiscvS8x16InterleaveLeft: + case kRiscvS8x16InterleaveRight: + case kRiscvS8x16PackEven: + case kRiscvS8x16PackOdd: + case kRiscvS8x2Reverse: + case kRiscvS8x4Reverse: + case kRiscvS8x8Reverse: + case kRiscvI8x16Shuffle: + case kRiscvVwmul: + case kRiscvVwmulu: + case kRiscvVmvSx: + case kRiscvVcompress: + case kRiscvVaddVv: + case kRiscvVwadd: + case kRiscvVwaddu: + case kRiscvVrgather: + case kRiscvVslidedown: + case kRiscvSar32: + case kRiscvSignExtendByte: + case kRiscvSignExtendShort: + case kRiscvShl32: + case kRiscvShr32: + case kRiscvSqrtD: + case kRiscvSqrtS: + case kRiscvSub32: + case kRiscvSubD: + case kRiscvSubS: + case kRiscvTruncLD: + case kRiscvTruncLS: + case kRiscvTruncUlD: + case kRiscvTruncUlS: + case kRiscvTruncUwD: + case kRiscvTruncUwS: + case kRiscvTruncWD: + case kRiscvTruncWS: + case kRiscvTst: + case kRiscvXor: + case kRiscvXor32: + return kNoOpcodeFlags; + + case kRiscvLb: + case kRiscvLbu: + case kRiscvLd: + case kRiscvLoadDouble: + case kRiscvLh: + case kRiscvLhu: + case kRiscvLw: + case kRiscvLoadFloat: + case kRiscvLwu: + case kRiscvRvvLd: + case kRiscvPeek: + case kRiscvUld: + case kRiscvULoadDouble: + case kRiscvUlh: + case kRiscvUlhu: + case kRiscvUlw: + case kRiscvUlwu: + case kRiscvULoadFloat: + case kRiscvS128LoadSplat: + case kRiscvS128Load64ExtendU: + case kRiscvS128Load64ExtendS: + case kRiscvS128LoadLane: + case kRiscvWord64AtomicLoadUint64: + case kRiscvLoadDecompressTaggedSigned: + case kRiscvLoadDecompressTaggedPointer: + case kRiscvLoadDecompressAnyTagged: + return kIsLoadOperation; + + case kRiscvModD: + case kRiscvModS: + case kRiscvRvvSt: + case kRiscvPush: + case kRiscvSb: + case kRiscvSd: + case kRiscvStoreDouble: + case kRiscvSh: + case kRiscvStackClaim: + case kRiscvStoreToStackSlot: + case kRiscvSw: + case kRiscvStoreFloat: + case kRiscvUsd: + case kRiscvUStoreDouble: + case kRiscvUsh: + case kRiscvUsw: + case kRiscvUStoreFloat: + case kRiscvSync: + case kRiscvWord64AtomicStoreWord64: + case kRiscvWord64AtomicAddUint64: + case kRiscvWord64AtomicSubUint64: + case kRiscvWord64AtomicAndUint64: + case kRiscvWord64AtomicOrUint64: + case kRiscvWord64AtomicXorUint64: + case kRiscvWord64AtomicExchangeUint64: + case kRiscvWord64AtomicCompareExchangeUint64: + case kRiscvStoreCompressTagged: + case kRiscvS128StoreLane: + return kHasSideEffect; + +#define CASE(Name) case k##Name: + COMMON_ARCH_OPCODE_LIST(CASE) +#undef CASE + // Already covered in architecture independent code. + UNREACHABLE(); + } + + UNREACHABLE(); +} + +enum Latency { + BRANCH = 4, // Estimated max. + RINT_S = 4, // Estimated. + RINT_D = 4, // Estimated. + + // TODO(RISCV): remove MULT instructions (MIPS legacy). + MULT = 4, + MULTU = 4, + DMULT = 4, + + MUL32 = 7, + + DIV32 = 50, // Min:11 Max:50 + DIV64 = 50, + DIVU32 = 50, + DIVU64 = 50, + + ABS_S = 4, + ABS_D = 4, + NEG_S = 4, + NEG_D = 4, + ADD_S = 4, + ADD_D = 4, + SUB_S = 4, + SUB_D = 4, + MAX_S = 4, // Estimated. + MIN_S = 4, + MAX_D = 4, // Estimated. + MIN_D = 4, + C_cond_S = 4, + C_cond_D = 4, + MUL_S = 4, + + MADD_S = 4, + MSUB_S = 4, + NMADD_S = 4, + NMSUB_S = 4, + + CABS_cond_S = 4, + CABS_cond_D = 4, + + CVT_D_S = 4, + CVT_PS_PW = 4, + + CVT_S_W = 4, + CVT_S_L = 4, + CVT_D_W = 4, + CVT_D_L = 4, + + CVT_S_D = 4, + + CVT_W_S = 4, + CVT_W_D = 4, + CVT_L_S = 4, + CVT_L_D = 4, + + CEIL_W_S = 4, + CEIL_W_D = 4, + CEIL_L_S = 4, + CEIL_L_D = 4, + + FLOOR_W_S = 4, + FLOOR_W_D = 4, + FLOOR_L_S = 4, + FLOOR_L_D = 4, + + ROUND_W_S = 4, + ROUND_W_D = 4, + ROUND_L_S = 4, + ROUND_L_D = 4, + + TRUNC_W_S = 4, + TRUNC_W_D = 4, + TRUNC_L_S = 4, + TRUNC_L_D = 4, + + MOV_S = 4, + MOV_D = 4, + + MOVF_S = 4, + MOVF_D = 4, + + MOVN_S = 4, + MOVN_D = 4, + + MOVT_S = 4, + MOVT_D = 4, + + MOVZ_S = 4, + MOVZ_D = 4, + + MUL_D = 5, + MADD_D = 5, + MSUB_D = 5, + NMADD_D = 5, + NMSUB_D = 5, + + RECIP_S = 13, + RECIP_D = 26, + + RSQRT_S = 17, + RSQRT_D = 36, + + DIV_S = 17, + SQRT_S = 17, + + DIV_D = 32, + SQRT_D = 32, + + MOVT_FREG = 4, + MOVT_HIGH_FREG = 4, + MOVT_DREG = 4, + LOAD_FLOAT = 4, + LOAD_DOUBLE = 4, + + MOVF_FREG = 1, + MOVF_HIGH_FREG = 1, + MOVF_HIGH_DREG = 1, + MOVF_HIGH = 1, + MOVF_LOW = 1, + STORE_FLOAT = 1, + STORE_DOUBLE = 1, +}; + +int Add64Latency(bool is_operand_register = true) { + if (is_operand_register) { + return 1; + } else { + return 2; // Estimated max. + } +} + +int Sub64Latency(bool is_operand_register = true) { + return Add64Latency(is_operand_register); +} + +int AndLatency(bool is_operand_register = true) { + return Add64Latency(is_operand_register); +} + +int OrLatency(bool is_operand_register = true) { + return Add64Latency(is_operand_register); +} + +int NorLatency(bool is_operand_register = true) { + if (is_operand_register) { + return 1; + } else { + return 2; // Estimated max. + } +} + +int XorLatency(bool is_operand_register = true) { + return Add64Latency(is_operand_register); +} + +int Mul32Latency(bool is_operand_register = true) { + if (is_operand_register) { + return Latency::MUL32; + } else { + return Latency::MUL32 + 1; + } +} + +int Mul64Latency(bool is_operand_register = true) { + int latency = Latency::DMULT + Latency::MOVF_LOW; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Mulh32Latency(bool is_operand_register = true) { + int latency = Latency::MULT + Latency::MOVF_HIGH; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Mulhu32Latency(bool is_operand_register = true) { + int latency = Latency::MULTU + Latency::MOVF_HIGH; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Mulh64Latency(bool is_operand_register = true) { + int latency = Latency::DMULT + Latency::MOVF_HIGH; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Div32Latency(bool is_operand_register = true) { + if (is_operand_register) { + return Latency::DIV32; + } else { + return Latency::DIV32 + 1; + } +} + +int Divu32Latency(bool is_operand_register = true) { + if (is_operand_register) { + return Latency::DIVU32; + } else { + return Latency::DIVU32 + 1; + } +} + +int Div64Latency(bool is_operand_register = true) { + int latency = Latency::DIV64 + Latency::MOVF_LOW; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Divu64Latency(bool is_operand_register = true) { + int latency = Latency::DIVU64 + Latency::MOVF_LOW; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Mod32Latency(bool is_operand_register = true) { + int latency = Latency::DIV32 + Latency::MOVF_HIGH; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Modu32Latency(bool is_operand_register = true) { + int latency = Latency::DIVU32 + Latency::MOVF_HIGH; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Mod64Latency(bool is_operand_register = true) { + int latency = Latency::DIV64 + Latency::MOVF_HIGH; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int Modu64Latency(bool is_operand_register = true) { + int latency = Latency::DIV64 + Latency::MOVF_HIGH; + if (!is_operand_register) { + latency += 1; + } + return latency; +} + +int MovzLatency() { return 1; } + +int MovnLatency() { return 1; } + +int CallLatency() { + // Estimated. + return Add64Latency(false) + Latency::BRANCH + 5; +} + +int JumpLatency() { + // Estimated max. + return 1 + Add64Latency() + Latency::BRANCH + 2; +} + +int SmiUntagLatency() { return 1; } + +int PrepareForTailCallLatency() { + // Estimated max. + return 2 * (Add64Latency() + 1 + Add64Latency(false)) + 2 + Latency::BRANCH + + Latency::BRANCH + 2 * Sub64Latency(false) + 2 + Latency::BRANCH + 1; +} + +int AssemblePopArgumentsAdoptFrameLatency() { + return 1 + Latency::BRANCH + 1 + SmiUntagLatency() + + PrepareForTailCallLatency(); +} + +int AssertLatency() { return 1; } + +int PrepareCallCFunctionLatency() { + int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + if (frame_alignment > kSystemPointerSize) { + return 1 + Sub64Latency(false) + AndLatency(false) + 1; + } else { + return Sub64Latency(false); + } +} + +int AdjustBaseAndOffsetLatency() { + return 3; // Estimated max. +} + +int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; } + +int UlhuLatency() { + return AdjustBaseAndOffsetLatency() + 2 * AlignedMemoryLatency() + 2; +} + +int UlwLatency() { + // Estimated max. + return AdjustBaseAndOffsetLatency() + 3; +} + +int UlwuLatency() { return UlwLatency() + 1; } + +int UldLatency() { + // Estimated max. + return AdjustBaseAndOffsetLatency() + 3; +} + +int ULoadFloatLatency() { return UlwLatency() + Latency::MOVT_FREG; } + +int ULoadDoubleLatency() { return UldLatency() + Latency::MOVT_DREG; } + +int UshLatency() { + // Estimated max. + return AdjustBaseAndOffsetLatency() + 2 + 2 * AlignedMemoryLatency(); +} + +int UswLatency() { return AdjustBaseAndOffsetLatency() + 2; } + +int UsdLatency() { return AdjustBaseAndOffsetLatency() + 2; } + +int UStoreFloatLatency() { return Latency::MOVF_FREG + UswLatency(); } + +int UStoreDoubleLatency() { return Latency::MOVF_HIGH_DREG + UsdLatency(); } + +int LoadFloatLatency() { + return AdjustBaseAndOffsetLatency() + Latency::LOAD_FLOAT; +} + +int StoreFloatLatency() { + return AdjustBaseAndOffsetLatency() + Latency::STORE_FLOAT; +} + +int StoreDoubleLatency() { + return AdjustBaseAndOffsetLatency() + Latency::STORE_DOUBLE; +} + +int LoadDoubleLatency() { + return AdjustBaseAndOffsetLatency() + Latency::LOAD_DOUBLE; +} + +int MultiPushLatency() { + int latency = Sub64Latency(false); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + latency++; + } + return latency; +} + +int MultiPushFPULatency() { + int latency = Sub64Latency(false); + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { + latency += StoreDoubleLatency(); + } + return latency; +} + +int PushCallerSavedLatency(SaveFPRegsMode fp_mode) { + int latency = MultiPushLatency(); + if (fp_mode == SaveFPRegsMode::kSave) { + latency += MultiPushFPULatency(); + } + return latency; +} + +int MultiPopLatency() { + int latency = Add64Latency(false); + for (int16_t i = 0; i < kNumRegisters; i++) { + latency++; + } + return latency; +} + +int MultiPopFPULatency() { + int latency = Add64Latency(false); + for (int16_t i = 0; i < kNumRegisters; i++) { + latency += LoadDoubleLatency(); + } + return latency; +} + +int PopCallerSavedLatency(SaveFPRegsMode fp_mode) { + int latency = MultiPopLatency(); + if (fp_mode == SaveFPRegsMode::kSave) { + latency += MultiPopFPULatency(); + } + return latency; +} + +int CallCFunctionHelperLatency() { + // Estimated. + int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency(); + if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { + latency++; + } else { + latency += Add64Latency(false); + } + return latency; +} + +int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); } + +int AssembleArchJumpLatency() { + // Estimated max. + return Latency::BRANCH; +} + +int GenerateSwitchTableLatency() { + int latency = 6; + latency += 2; + return latency; +} + +int AssembleArchTableSwitchLatency() { + return Latency::BRANCH + GenerateSwitchTableLatency(); +} + +int DropAndRetLatency() { + // Estimated max. + return Add64Latency(false) + JumpLatency(); +} + +int AssemblerReturnLatency() { + // Estimated max. + return Add64Latency(false) + MultiPopLatency() + MultiPopFPULatency() + + Latency::BRANCH + Add64Latency() + 1 + DropAndRetLatency(); +} + +int TryInlineTruncateDoubleToILatency() { + return 2 + Latency::TRUNC_W_D + Latency::MOVF_FREG + 2 + AndLatency(false) + + Latency::BRANCH; +} + +int CallStubDelayedLatency() { return 1 + CallLatency(); } + +int TruncateDoubleToIDelayedLatency() { + // TODO(riscv): This no longer reflects how TruncateDoubleToI is called. + return TryInlineTruncateDoubleToILatency() + 1 + Sub64Latency(false) + + StoreDoubleLatency() + CallStubDelayedLatency() + Add64Latency(false) + + 1; +} + +int CheckPageFlagLatency() { + return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) + + Latency::BRANCH; +} + +int SltuLatency(bool is_operand_register = true) { + if (is_operand_register) { + return 1; + } else { + return 2; // Estimated max. + } +} + +int BranchShortHelperLatency() { + return SltuLatency() + 2; // Estimated max. +} + +int BranchShortLatency() { return BranchShortHelperLatency(); } + +int MoveLatency() { return 1; } + +int MovToFloatParametersLatency() { return 2 * MoveLatency(); } + +int MovFromFloatResultLatency() { return MoveLatency(); } + +int AddOverflow64Latency() { + // Estimated max. + return 6; +} + +int SubOverflow64Latency() { + // Estimated max. + return 6; +} + +int MulOverflow32Latency() { + // Estimated max. + return Mul32Latency() + Mulh32Latency() + 2; +} + +// TODO(RISCV): This is incorrect for RISC-V. +int Clz64Latency() { return 1; } + +int Ctz32Latency() { + return Add64Latency(false) + XorLatency() + AndLatency() + Clz64Latency() + + 1 + Sub64Latency(); +} + +int Ctz64Latency() { + return Add64Latency(false) + XorLatency() + AndLatency() + 1 + Sub64Latency(); +} + +int Popcnt32Latency() { + return 2 + AndLatency() + Sub64Latency() + 1 + AndLatency() + 1 + + AndLatency() + Add64Latency() + 1 + Add64Latency() + 1 + AndLatency() + + 1 + Mul32Latency() + 1; +} + +int Popcnt64Latency() { + return 2 + AndLatency() + Sub64Latency() + 1 + AndLatency() + 1 + + AndLatency() + Add64Latency() + 1 + Add64Latency() + 1 + AndLatency() + + 1 + Mul64Latency() + 1; +} + +int CompareFLatency() { return Latency::C_cond_S; } + +int CompareF32Latency() { return CompareFLatency(); } + +int CompareF64Latency() { return CompareFLatency(); } + +int CompareIsNanFLatency() { return CompareFLatency(); } + +int CompareIsNanF32Latency() { return CompareIsNanFLatency(); } + +int CompareIsNanF64Latency() { return CompareIsNanFLatency(); } + +int NegsLatency() { + // Estimated. + return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S + + Latency::MOVF_FREG + 1 + XorLatency() + Latency::MOVT_FREG; +} + +int NegdLatency() { + // Estimated. + return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D + + Latency::MOVF_HIGH_DREG + 1 + XorLatency() + Latency::MOVT_DREG; +} + +int Float64RoundLatency() { + // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4. + return Latency::MOVF_HIGH_DREG + 1 + Latency::BRANCH + Latency::MOV_D + 4 + + Latency::MOVF_HIGH_DREG + Latency::BRANCH + Latency::CVT_D_L + 2 + + Latency::MOVT_HIGH_FREG; +} + +int Float32RoundLatency() { + // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4. + return Latency::MOVF_FREG + 1 + Latency::BRANCH + Latency::MOV_S + 4 + + Latency::MOVF_FREG + Latency::BRANCH + Latency::CVT_S_W + 2 + + Latency::MOVT_FREG; +} + +int Float32MaxLatency() { + // Estimated max. + int latency = CompareIsNanF32Latency() + Latency::BRANCH; + return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() + + Latency::MOVF_FREG + 1 + Latency::MOV_S; +} + +int Float64MaxLatency() { + // Estimated max. + int latency = CompareIsNanF64Latency() + Latency::BRANCH; + return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() + + Latency::MOVF_HIGH_DREG + Latency::MOV_D; +} + +int Float32MinLatency() { + // Estimated max. + int latency = CompareIsNanF32Latency() + Latency::BRANCH; + return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() + + Latency::MOVF_FREG + 1 + Latency::MOV_S; +} + +int Float64MinLatency() { + // Estimated max. + int latency = CompareIsNanF64Latency() + Latency::BRANCH; + return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() + + Latency::MOVF_HIGH_DREG + Latency::MOV_D; +} + +int TruncLSLatency(bool load_status) { + int latency = Latency::TRUNC_L_S + Latency::MOVF_HIGH_DREG; + if (load_status) { + latency += SltuLatency() + 7; + } + return latency; +} + +int TruncLDLatency(bool load_status) { + int latency = Latency::TRUNC_L_D + Latency::MOVF_HIGH_DREG; + if (load_status) { + latency += SltuLatency() + 7; + } + return latency; +} + +int TruncUlSLatency() { + // Estimated max. + return 2 * CompareF32Latency() + CompareIsNanF32Latency() + + 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S + + 3 * Latency::MOVF_HIGH_DREG + OrLatency() + Latency::MOVT_FREG + + Latency::MOV_S + SltuLatency() + 4; +} + +int TruncUlDLatency() { + // Estimated max. + return 2 * CompareF64Latency() + CompareIsNanF64Latency() + + 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D + + 3 * Latency::MOVF_HIGH_DREG + OrLatency() + Latency::MOVT_DREG + + Latency::MOV_D + SltuLatency() + 4; +} + +int PushLatency() { return Add64Latency() + AlignedMemoryLatency(); } + +int ByteSwapSignedLatency() { return 2; } + +int LlLatency(int offset) { + bool is_one_instruction = is_int12(offset); + if (is_one_instruction) { + return 1; + } else { + return 3; + } +} + +int ExtractBitsLatency(bool sign_extend, int size) { + int latency = 2; + if (sign_extend) { + switch (size) { + case 8: + case 16: + case 32: + latency += 1; + break; + default: + UNREACHABLE(); + } + } + return latency; +} + +int InsertBitsLatency() { return 2 + Sub64Latency(false) + 2; } + +int ScLatency(int offset) { return 3; } + +int Word32AtomicExchangeLatency(bool sign_extend, int size) { + return Add64Latency(false) + 1 + Sub64Latency() + 2 + LlLatency(0) + + ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + + ScLatency(0) + BranchShortLatency() + 1; +} + +int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { + return 2 + Sub64Latency() + 2 + LlLatency(0) + + ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + + ScLatency(0) + BranchShortLatency() + 1; +} + +int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { + // TODO(RISCV): Verify these latencies for RISC-V (currently using MIPS + // numbers). + switch (instr->arch_opcode()) { + case kArchCallCodeObject: + case kArchCallWasmFunction: + return CallLatency(); + case kArchTailCallCodeObject: + case kArchTailCallWasm: + case kArchTailCallAddress: + return JumpLatency(); + case kArchCallJSFunction: { + int latency = 0; + if (FLAG_debug_code) { + latency = 1 + AssertLatency(); + } + return latency + 1 + Add64Latency(false) + CallLatency(); + } + case kArchPrepareCallCFunction: + return PrepareCallCFunctionLatency(); + case kArchSaveCallerRegisters: { + auto fp_mode = + static_cast(MiscField::decode(instr->opcode())); + return PushCallerSavedLatency(fp_mode); + } + case kArchRestoreCallerRegisters: { + auto fp_mode = + static_cast(MiscField::decode(instr->opcode())); + return PopCallerSavedLatency(fp_mode); + } + case kArchPrepareTailCall: + return 2; + case kArchCallCFunction: + return CallCFunctionLatency(); + case kArchJmp: + return AssembleArchJumpLatency(); + case kArchTableSwitch: + return AssembleArchTableSwitchLatency(); + case kArchAbortCSADcheck: + return CallLatency() + 1; + case kArchDebugBreak: + return 1; + case kArchComment: + case kArchNop: + case kArchThrowTerminator: + case kArchDeoptimize: + return 0; + case kArchRet: + return AssemblerReturnLatency(); + case kArchFramePointer: + return 1; + case kArchParentFramePointer: + // Estimated max. + return AlignedMemoryLatency(); + case kArchTruncateDoubleToI: + return TruncateDoubleToIDelayedLatency(); + case kArchStoreWithWriteBarrier: + return Add64Latency() + 1 + CheckPageFlagLatency(); + case kArchStackSlot: + // Estimated max. + return Add64Latency(false) + AndLatency(false) + AssertLatency() + + Add64Latency(false) + AndLatency(false) + BranchShortLatency() + + 1 + Sub64Latency() + Add64Latency(); + case kIeee754Float64Acos: + case kIeee754Float64Acosh: + case kIeee754Float64Asin: + case kIeee754Float64Asinh: + case kIeee754Float64Atan: + case kIeee754Float64Atanh: + case kIeee754Float64Atan2: + case kIeee754Float64Cos: + case kIeee754Float64Cosh: + case kIeee754Float64Cbrt: + case kIeee754Float64Exp: + case kIeee754Float64Expm1: + case kIeee754Float64Log: + case kIeee754Float64Log1p: + case kIeee754Float64Log10: + case kIeee754Float64Log2: + case kIeee754Float64Pow: + case kIeee754Float64Sin: + case kIeee754Float64Sinh: + case kIeee754Float64Tan: + case kIeee754Float64Tanh: + return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + + CallCFunctionLatency() + MovFromFloatResultLatency(); + case kRiscvAdd32: + case kRiscvAdd64: + return Add64Latency(instr->InputAt(1)->IsRegister()); + case kRiscvAddOvf64: + return AddOverflow64Latency(); + case kRiscvSub32: + case kRiscvSub64: + return Sub64Latency(instr->InputAt(1)->IsRegister()); + case kRiscvSubOvf64: + return SubOverflow64Latency(); + case kRiscvMul32: + return Mul32Latency(); + case kRiscvMulOvf32: + return MulOverflow32Latency(); + case kRiscvMulHigh32: + return Mulh32Latency(); + case kRiscvMulHighU32: + return Mulhu32Latency(); + case kRiscvMulHigh64: + return Mulh64Latency(); + case kRiscvDiv32: { + int latency = Div32Latency(instr->InputAt(1)->IsRegister()); + return latency + MovzLatency(); + } + case kRiscvDivU32: { + int latency = Divu32Latency(instr->InputAt(1)->IsRegister()); + return latency + MovzLatency(); + } + case kRiscvMod32: + return Mod32Latency(); + case kRiscvModU32: + return Modu32Latency(); + case kRiscvMul64: + return Mul64Latency(); + case kRiscvDiv64: { + int latency = Div64Latency(); + return latency + MovzLatency(); + } + case kRiscvDivU64: { + int latency = Divu64Latency(); + return latency + MovzLatency(); + } + case kRiscvMod64: + return Mod64Latency(); + case kRiscvModU64: + return Modu64Latency(); + case kRiscvAnd: + return AndLatency(instr->InputAt(1)->IsRegister()); + case kRiscvAnd32: { + bool is_operand_register = instr->InputAt(1)->IsRegister(); + int latency = AndLatency(is_operand_register); + if (is_operand_register) { + return latency + 2; + } else { + return latency + 1; + } + } + case kRiscvOr: + return OrLatency(instr->InputAt(1)->IsRegister()); + case kRiscvOr32: { + bool is_operand_register = instr->InputAt(1)->IsRegister(); + int latency = OrLatency(is_operand_register); + if (is_operand_register) { + return latency + 2; + } else { + return latency + 1; + } + } + case kRiscvNor: + return NorLatency(instr->InputAt(1)->IsRegister()); + case kRiscvNor32: { + bool is_operand_register = instr->InputAt(1)->IsRegister(); + int latency = NorLatency(is_operand_register); + if (is_operand_register) { + return latency + 2; + } else { + return latency + 1; + } + } + case kRiscvXor: + return XorLatency(instr->InputAt(1)->IsRegister()); + case kRiscvXor32: { + bool is_operand_register = instr->InputAt(1)->IsRegister(); + int latency = XorLatency(is_operand_register); + if (is_operand_register) { + return latency + 2; + } else { + return latency + 1; + } + } + case kRiscvClz32: + case kRiscvClz64: + return Clz64Latency(); + case kRiscvCtz32: + return Ctz32Latency(); + case kRiscvCtz64: + return Ctz64Latency(); + case kRiscvPopcnt32: + return Popcnt32Latency(); + case kRiscvPopcnt64: + return Popcnt64Latency(); + case kRiscvShl32: + return 1; + case kRiscvShr32: + case kRiscvSar32: + case kRiscvZeroExtendWord: + return 2; + case kRiscvSignExtendWord: + case kRiscvShl64: + case kRiscvShr64: + case kRiscvSar64: + case kRiscvRor32: + case kRiscvRor64: + return 1; + case kRiscvTst: + return AndLatency(instr->InputAt(1)->IsRegister()); + case kRiscvMov: + return 1; + case kRiscvCmpS: + return MoveLatency() + CompareF32Latency(); + case kRiscvAddS: + return Latency::ADD_S; + case kRiscvSubS: + return Latency::SUB_S; + case kRiscvMulS: + return Latency::MUL_S; + case kRiscvDivS: + return Latency::DIV_S; + case kRiscvModS: + return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + + CallCFunctionLatency() + MovFromFloatResultLatency(); + case kRiscvAbsS: + return Latency::ABS_S; + case kRiscvNegS: + return NegdLatency(); + case kRiscvSqrtS: + return Latency::SQRT_S; + case kRiscvMaxS: + return Latency::MAX_S; + case kRiscvMinS: + return Latency::MIN_S; + case kRiscvCmpD: + return MoveLatency() + CompareF64Latency(); + case kRiscvAddD: + return Latency::ADD_D; + case kRiscvSubD: + return Latency::SUB_D; + case kRiscvMulD: + return Latency::MUL_D; + case kRiscvDivD: + return Latency::DIV_D; + case kRiscvModD: + return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + + CallCFunctionLatency() + MovFromFloatResultLatency(); + case kRiscvAbsD: + return Latency::ABS_D; + case kRiscvNegD: + return NegdLatency(); + case kRiscvSqrtD: + return Latency::SQRT_D; + case kRiscvMaxD: + return Latency::MAX_D; + case kRiscvMinD: + return Latency::MIN_D; + case kRiscvFloat64RoundDown: + case kRiscvFloat64RoundTruncate: + case kRiscvFloat64RoundUp: + case kRiscvFloat64RoundTiesEven: + return Float64RoundLatency(); + case kRiscvFloat32RoundDown: + case kRiscvFloat32RoundTruncate: + case kRiscvFloat32RoundUp: + case kRiscvFloat32RoundTiesEven: + return Float32RoundLatency(); + case kRiscvFloat32Max: + return Float32MaxLatency(); + case kRiscvFloat64Max: + return Float64MaxLatency(); + case kRiscvFloat32Min: + return Float32MinLatency(); + case kRiscvFloat64Min: + return Float64MinLatency(); + case kRiscvFloat64SilenceNaN: + return Latency::SUB_D; + case kRiscvCvtSD: + return Latency::CVT_S_D; + case kRiscvCvtDS: + return Latency::CVT_D_S; + case kRiscvCvtDW: + return Latency::MOVT_FREG + Latency::CVT_D_W; + case kRiscvCvtSW: + return Latency::MOVT_FREG + Latency::CVT_S_W; + case kRiscvCvtSUw: + return 1 + Latency::MOVT_DREG + Latency::CVT_S_L; + case kRiscvCvtSL: + return Latency::MOVT_DREG + Latency::CVT_S_L; + case kRiscvCvtDL: + return Latency::MOVT_DREG + Latency::CVT_D_L; + case kRiscvCvtDUw: + return 1 + Latency::MOVT_DREG + Latency::CVT_D_L; + case kRiscvCvtDUl: + return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG + + 2 * Latency::CVT_D_L + Latency::ADD_D; + case kRiscvCvtSUl: + return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG + + 2 * Latency::CVT_S_L + Latency::ADD_S; + case kRiscvFloorWD: + return Latency::FLOOR_W_D + Latency::MOVF_FREG; + case kRiscvCeilWD: + return Latency::CEIL_W_D + Latency::MOVF_FREG; + case kRiscvRoundWD: + return Latency::ROUND_W_D + Latency::MOVF_FREG; + case kRiscvTruncWD: + return Latency::TRUNC_W_D + Latency::MOVF_FREG; + case kRiscvFloorWS: + return Latency::FLOOR_W_S + Latency::MOVF_FREG; + case kRiscvCeilWS: + return Latency::CEIL_W_S + Latency::MOVF_FREG; + case kRiscvRoundWS: + return Latency::ROUND_W_S + Latency::MOVF_FREG; + case kRiscvTruncWS: + return Latency::TRUNC_W_S + Latency::MOVF_FREG + 2 + MovnLatency(); + case kRiscvTruncLS: + return TruncLSLatency(instr->OutputCount() > 1); + case kRiscvTruncLD: + return TruncLDLatency(instr->OutputCount() > 1); + case kRiscvTruncUwD: + // Estimated max. + return CompareF64Latency() + 2 * Latency::BRANCH + + 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() + + Latency::MOVT_FREG + Latency::MOVF_FREG + Latency::MOVT_HIGH_FREG + + 1; + case kRiscvTruncUwS: + // Estimated max. + return CompareF32Latency() + 2 * Latency::BRANCH + + 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() + + Latency::MOVT_FREG + 2 * Latency::MOVF_FREG + 2 + MovzLatency(); + case kRiscvTruncUlS: + return TruncUlSLatency(); + case kRiscvTruncUlD: + return TruncUlDLatency(); + case kRiscvBitcastDL: + return Latency::MOVF_HIGH_DREG; + case kRiscvBitcastLD: + return Latency::MOVT_DREG; + case kRiscvFloat64ExtractLowWord32: + return Latency::MOVF_FREG; + case kRiscvFloat64InsertLowWord32: + return Latency::MOVF_HIGH_FREG + Latency::MOVT_FREG + + Latency::MOVT_HIGH_FREG; + case kRiscvFloat64ExtractHighWord32: + return Latency::MOVF_HIGH_FREG; + case kRiscvFloat64InsertHighWord32: + return Latency::MOVT_HIGH_FREG; + case kRiscvSignExtendByte: + case kRiscvSignExtendShort: + return 1; + case kRiscvLbu: + case kRiscvLb: + case kRiscvLhu: + case kRiscvLh: + case kRiscvLwu: + case kRiscvLw: + case kRiscvLd: + case kRiscvSb: + case kRiscvSh: + case kRiscvSw: + case kRiscvSd: + return AlignedMemoryLatency(); + case kRiscvLoadFloat: + return ULoadFloatLatency(); + case kRiscvLoadDouble: + return LoadDoubleLatency(); + case kRiscvStoreFloat: + return StoreFloatLatency(); + case kRiscvStoreDouble: + return StoreDoubleLatency(); + case kRiscvUlhu: + case kRiscvUlh: + return UlhuLatency(); + case kRiscvUlwu: + return UlwuLatency(); + case kRiscvUlw: + return UlwLatency(); + case kRiscvUld: + return UldLatency(); + case kRiscvULoadFloat: + return ULoadFloatLatency(); + case kRiscvULoadDouble: + return ULoadDoubleLatency(); + case kRiscvUsh: + return UshLatency(); + case kRiscvUsw: + return UswLatency(); + case kRiscvUsd: + return UsdLatency(); + case kRiscvUStoreFloat: + return UStoreFloatLatency(); + case kRiscvUStoreDouble: + return UStoreDoubleLatency(); + case kRiscvPush: { + int latency = 0; + if (instr->InputAt(0)->IsFPRegister()) { + latency = StoreDoubleLatency() + Sub64Latency(false); + } else { + latency = PushLatency(); + } + return latency; + } + case kRiscvPeek: { + int latency = 0; + if (instr->OutputAt(0)->IsFPRegister()) { + auto op = LocationOperand::cast(instr->OutputAt(0)); + switch (op->representation()) { + case MachineRepresentation::kFloat64: + latency = LoadDoubleLatency(); + break; + case MachineRepresentation::kFloat32: + latency = Latency::LOAD_FLOAT; + break; + default: + UNREACHABLE(); + } + } else { + latency = AlignedMemoryLatency(); + } + return latency; + } + case kRiscvStackClaim: + return Sub64Latency(false); + case kRiscvStoreToStackSlot: { + int latency = 0; + if (instr->InputAt(0)->IsFPRegister()) { + if (instr->InputAt(0)->IsSimd128Register()) { + latency = 1; // Estimated value. + } else { + latency = StoreDoubleLatency(); + } + } else { + latency = AlignedMemoryLatency(); + } + return latency; + } + case kRiscvByteSwap64: + return ByteSwapSignedLatency(); + case kRiscvByteSwap32: + return ByteSwapSignedLatency(); + case kAtomicLoadInt8: + case kAtomicLoadUint8: + case kAtomicLoadInt16: + case kAtomicLoadUint16: + case kAtomicLoadWord32: + return 2; + case kAtomicStoreWord8: + case kAtomicStoreWord16: + case kAtomicStoreWord32: + return 3; + case kAtomicExchangeInt8: + return Word32AtomicExchangeLatency(true, 8); + case kAtomicExchangeUint8: + return Word32AtomicExchangeLatency(false, 8); + case kAtomicExchangeInt16: + return Word32AtomicExchangeLatency(true, 16); + case kAtomicExchangeUint16: + return Word32AtomicExchangeLatency(false, 16); + case kAtomicExchangeWord32: + return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1; + case kAtomicCompareExchangeInt8: + return Word32AtomicCompareExchangeLatency(true, 8); + case kAtomicCompareExchangeUint8: + return Word32AtomicCompareExchangeLatency(false, 8); + case kAtomicCompareExchangeInt16: + return Word32AtomicCompareExchangeLatency(true, 16); + case kAtomicCompareExchangeUint16: + return Word32AtomicCompareExchangeLatency(false, 16); + case kAtomicCompareExchangeWord32: + return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) + + BranchShortLatency() + 1; + case kRiscvAssertEqual: + return AssertLatency(); + default: + return 1; + } +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc new file mode 100644 index 00000000000000..fce1b92f9683c5 --- /dev/null +++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc @@ -0,0 +1,3403 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/bits.h" +#include "src/compiler/backend/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties.h" + +namespace v8 { +namespace internal { +namespace compiler { + +#define TRACE_UNIMPL() \ + PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) + +#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) + +// Adds RISC-V-specific methods for generating InstructionOperands. +class RiscvOperandGenerator final : public OperandGenerator { + public: + explicit RiscvOperandGenerator(InstructionSelector* selector) + : OperandGenerator(selector) {} + + InstructionOperand UseOperand(Node* node, InstructionCode opcode) { + if (CanBeImmediate(node, opcode)) { + return UseImmediate(node); + } + return UseRegister(node); + } + + // Use the zero register if the node has the immediate value zero, otherwise + // assign a register. + InstructionOperand UseRegisterOrImmediateZero(Node* node) { + if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) || + (IsFloatConstant(node) && + (bit_cast(GetFloatConstantValue(node)) == 0))) { + return UseImmediate(node); + } + return UseRegister(node); + } + + bool IsIntegerConstant(Node* node) { + if (node->opcode() == IrOpcode::kNumberConstant) { + const double value = OpParameter(node->op()); + return bit_cast(value) == 0; + } + return (node->opcode() == IrOpcode::kInt32Constant) || + (node->opcode() == IrOpcode::kInt64Constant); + } + + int64_t GetIntegerConstantValue(Node* node) { + if (node->opcode() == IrOpcode::kInt32Constant) { + return OpParameter(node->op()); + } else if (node->opcode() == IrOpcode::kInt64Constant) { + return OpParameter(node->op()); + } + DCHECK_EQ(node->opcode(), IrOpcode::kNumberConstant); + const double value = OpParameter(node->op()); + DCHECK_EQ(bit_cast(value), 0); + return bit_cast(value); + } + + bool IsFloatConstant(Node* node) { + return (node->opcode() == IrOpcode::kFloat32Constant) || + (node->opcode() == IrOpcode::kFloat64Constant); + } + + double GetFloatConstantValue(Node* node) { + if (node->opcode() == IrOpcode::kFloat32Constant) { + return OpParameter(node->op()); + } + DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode()); + return OpParameter(node->op()); + } + + bool CanBeImmediate(Node* node, InstructionCode mode) { + return IsIntegerConstant(node) && + CanBeImmediate(GetIntegerConstantValue(node), mode); + } + + bool CanBeImmediate(int64_t value, InstructionCode opcode) { + switch (ArchOpcodeField::decode(opcode)) { + case kRiscvShl32: + case kRiscvSar32: + case kRiscvShr32: + return is_uint5(value); + case kRiscvShl64: + case kRiscvSar64: + case kRiscvShr64: + return is_uint6(value); + case kRiscvAdd32: + case kRiscvAnd32: + case kRiscvAnd: + case kRiscvAdd64: + case kRiscvOr32: + case kRiscvOr: + case kRiscvTst: + case kRiscvXor: + return is_int12(value); + case kRiscvLb: + case kRiscvLbu: + case kRiscvSb: + case kRiscvLh: + case kRiscvLhu: + case kRiscvSh: + case kRiscvLw: + case kRiscvSw: + case kRiscvLd: + case kRiscvSd: + case kRiscvLoadFloat: + case kRiscvStoreFloat: + case kRiscvLoadDouble: + case kRiscvStoreDouble: + return is_int32(value); + default: + return is_int12(value); + } + } + + private: + bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { + TRACE_UNIMPL(); + return false; + } +}; + +static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + RiscvOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + RiscvOperandGenerator g(selector); + int32_t imm = OpParameter(node->op()); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); +} + +static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + RiscvOperandGenerator g(selector); + if (g.IsIntegerConstant(node->InputAt(1))) { + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseImmediate(node->InputAt(1))); + } else { + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1))); + } +} + +static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + RiscvOperandGenerator g(selector); + int32_t imm = OpParameter(node->op()); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseImmediate(imm), + g.UseRegister(node->InputAt(1))); +} + +static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + RiscvOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1))); +} + +static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + RiscvOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1))); +} + +void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { + RiscvOperandGenerator g(selector); + selector->Emit( + opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); +} + +static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, + Node* node) { + RiscvOperandGenerator g(selector); + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), + g.UseOperand(node->InputAt(1), opcode)); +} + +struct ExtendingLoadMatcher { + ExtendingLoadMatcher(Node* node, InstructionSelector* selector) + : matches_(false), selector_(selector), base_(nullptr), immediate_(0) { + Initialize(node); + } + + bool Matches() const { return matches_; } + + Node* base() const { + DCHECK(Matches()); + return base_; + } + int64_t immediate() const { + DCHECK(Matches()); + return immediate_; + } + ArchOpcode opcode() const { + DCHECK(Matches()); + return opcode_; + } + + private: + bool matches_; + InstructionSelector* selector_; + Node* base_; + int64_t immediate_; + ArchOpcode opcode_; + + void Initialize(Node* node) { + Int64BinopMatcher m(node); + // When loading a 64-bit value and shifting by 32, we should + // just load and sign-extend the interesting 4 bytes instead. + // This happens, for example, when we're loading and untagging SMIs. + DCHECK(m.IsWord64Sar()); + if (m.left().IsLoad() && m.right().Is(32) && + selector_->CanCover(m.node(), m.left().node())) { + DCHECK_EQ(selector_->GetEffectLevel(node), + selector_->GetEffectLevel(m.left().node())); + MachineRepresentation rep = + LoadRepresentationOf(m.left().node()->op()).representation(); + DCHECK_EQ(3, ElementSizeLog2Of(rep)); + if (rep != MachineRepresentation::kTaggedSigned && + rep != MachineRepresentation::kTaggedPointer && + rep != MachineRepresentation::kTagged && + rep != MachineRepresentation::kWord64) { + return; + } + + RiscvOperandGenerator g(selector_); + Node* load = m.left().node(); + Node* offset = load->InputAt(1); + base_ = load->InputAt(0); + opcode_ = kRiscvLw; + if (g.CanBeImmediate(offset, opcode_)) { +#if defined(V8_TARGET_LITTLE_ENDIAN) + immediate_ = g.GetIntegerConstantValue(offset) + 4; +#elif defined(V8_TARGET_BIG_ENDIAN) + immediate_ = g.GetIntegerConstantValue(offset); +#endif + matches_ = g.CanBeImmediate(immediate_, kRiscvLw); + } + } + } +}; + +bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node, + Node* output_node) { + ExtendingLoadMatcher m(node, selector); + RiscvOperandGenerator g(selector); + if (m.Matches()) { + InstructionOperand inputs[2]; + inputs[0] = g.UseRegister(m.base()); + InstructionCode opcode = + m.opcode() | AddressingModeField::encode(kMode_MRI); + DCHECK(is_int32(m.immediate())); + inputs[1] = g.TempImmediate(static_cast(m.immediate())); + InstructionOperand outputs[] = {g.DefineAsRegister(output_node)}; + selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs), + inputs); + return true; + } + return false; +} + +bool TryMatchImmediate(InstructionSelector* selector, + InstructionCode* opcode_return, Node* node, + size_t* input_count_return, InstructionOperand* inputs) { + RiscvOperandGenerator g(selector); + if (g.CanBeImmediate(node, *opcode_return)) { + *opcode_return |= AddressingModeField::encode(kMode_MRI); + inputs[0] = g.UseImmediate(node); + *input_count_return = 1; + return true; + } + return false; +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, bool has_reverse_opcode, + InstructionCode reverse_opcode, + FlagsContinuation* cont) { + RiscvOperandGenerator g(selector); + Int32BinopMatcher m(node); + InstructionOperand inputs[2]; + size_t input_count = 0; + InstructionOperand outputs[1]; + size_t output_count = 0; + + if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count, + &inputs[1])) { + inputs[0] = g.UseRegisterOrImmediateZero(m.left().node()); + input_count++; + } else if (has_reverse_opcode && + TryMatchImmediate(selector, &reverse_opcode, m.left().node(), + &input_count, &inputs[1])) { + inputs[0] = g.UseRegisterOrImmediateZero(m.right().node()); + opcode = reverse_opcode; + input_count++; + } else { + inputs[input_count++] = g.UseRegister(m.left().node()); + inputs[input_count++] = g.UseOperand(m.right().node(), opcode); + } + + if (cont->IsDeoptimize()) { + // If we can deoptimize as a result of the binop, we need to make sure that + // the deopt inputs are not overwritten by the binop result. One way + // to achieve that is to declare the output register as same-as-first. + outputs[output_count++] = g.DefineSameAsFirst(node); + } else { + outputs[output_count++] = g.DefineAsRegister(node); + } + + DCHECK_NE(0u, input_count); + DCHECK_EQ(1u, output_count); + DCHECK_GE(arraysize(inputs), input_count); + DCHECK_GE(arraysize(outputs), output_count); + + selector->EmitWithContinuation(opcode, output_count, outputs, input_count, + inputs, cont); +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, bool has_reverse_opcode, + InstructionCode reverse_opcode) { + FlagsContinuation cont; + VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont); +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont) { + VisitBinop(selector, node, opcode, false, kArchNop, cont); +} + +static void VisitBinop(InstructionSelector* selector, Node* node, + InstructionCode opcode) { + VisitBinop(selector, node, opcode, false, kArchNop); +} + +void InstructionSelector::VisitStackSlot(Node* node) { + StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); + int alignment = rep.alignment(); + int slot = frame_->AllocateSpillSlot(rep.size(), alignment); + OperandGenerator g(this); + + Emit(kArchStackSlot, g.DefineAsRegister(node), + sequence()->AddImmediate(Constant(slot)), + sequence()->AddImmediate(Constant(alignment)), 0, nullptr); +} + +void InstructionSelector::VisitAbortCSADcheck(Node* node) { + RiscvOperandGenerator g(this); + Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); +} + +void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, + Node* output = nullptr) { + RiscvOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + ExternalReferenceMatcher m(base); + if (m.HasResolvedValue() && g.IsIntegerConstant(index) && + selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { + ptrdiff_t const delta = + g.GetIntegerConstantValue(index) + + TurboAssemblerBase::RootRegisterOffsetForExternalReference( + selector->isolate(), m.ResolvedValue()); + // Check that the delta is a 32-bit integer due to the limitations of + // immediate operands. + if (is_int32(delta)) { + opcode |= AddressingModeField::encode(kMode_Root); + selector->Emit(opcode, + g.DefineAsRegister(output == nullptr ? node : output), + g.UseImmediate(static_cast(delta))); + return; + } + } + + if (g.CanBeImmediate(index, opcode)) { + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(output == nullptr ? node : output), + g.UseRegister(base), g.UseImmediate(index)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), + addr_reg, g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(output == nullptr ? node : output), + addr_reg, g.TempImmediate(0)); + } +} + +void EmitS128Load(InstructionSelector* selector, Node* node, + InstructionCode opcode, VSew sew, Vlmul lmul) { + RiscvOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + if (g.CanBeImmediate(index, opcode)) { + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), g.UseRegister(base), + g.UseImmediate(index), g.UseImmediate(sew), + g.UseImmediate(lmul)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), + addr_reg, g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), addr_reg, g.TempImmediate(0), + g.UseImmediate(sew), g.UseImmediate(lmul)); + } +} + +void InstructionSelector::VisitStoreLane(Node* node) { + StoreLaneParameters params = StoreLaneParametersOf(node->op()); + LoadStoreLaneParams f(params.rep, params.laneidx); + InstructionCode opcode = kRiscvS128StoreLane; + opcode |= MiscField::encode(f.sz); + + RiscvOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + InstructionOperand addr_reg = g.TempRegister(); + Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index)); + InstructionOperand inputs[4] = { + g.UseRegister(node->InputAt(2)), + g.UseImmediate(f.laneidx), + addr_reg, + g.TempImmediate(0), + }; + opcode |= AddressingModeField::encode(kMode_MRI); + Emit(opcode, 0, nullptr, 4, inputs); +} +void InstructionSelector::VisitLoadLane(Node* node) { + LoadLaneParameters params = LoadLaneParametersOf(node->op()); + LoadStoreLaneParams f(params.rep.representation(), params.laneidx); + InstructionCode opcode = kRiscvS128LoadLane; + opcode |= MiscField::encode(f.sz); + + RiscvOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + InstructionOperand addr_reg = g.TempRegister(); + Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index)); + opcode |= AddressingModeField::encode(kMode_MRI); + Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)), + g.UseImmediate(params.laneidx), addr_reg, g.TempImmediate(0)); +} + +void InstructionSelector::VisitLoadTransform(Node* node) { + LoadTransformParameters params = LoadTransformParametersOf(node->op()); + + switch (params.transformation) { + case LoadTransformation::kS128Load8Splat: + EmitS128Load(this, node, kRiscvS128LoadSplat, E8, m1); + break; + case LoadTransformation::kS128Load16Splat: + EmitS128Load(this, node, kRiscvS128LoadSplat, E16, m1); + break; + case LoadTransformation::kS128Load32Splat: + EmitS128Load(this, node, kRiscvS128LoadSplat, E32, m1); + break; + case LoadTransformation::kS128Load64Splat: + EmitS128Load(this, node, kRiscvS128LoadSplat, E64, m1); + break; + case LoadTransformation::kS128Load8x8S: + EmitS128Load(this, node, kRiscvS128Load64ExtendS, E16, m1); + break; + case LoadTransformation::kS128Load8x8U: + EmitS128Load(this, node, kRiscvS128Load64ExtendU, E16, m1); + break; + case LoadTransformation::kS128Load16x4S: + EmitS128Load(this, node, kRiscvS128Load64ExtendS, E32, m1); + break; + case LoadTransformation::kS128Load16x4U: + EmitS128Load(this, node, kRiscvS128Load64ExtendU, E32, m1); + break; + case LoadTransformation::kS128Load32x2S: + EmitS128Load(this, node, kRiscvS128Load64ExtendS, E64, m1); + break; + case LoadTransformation::kS128Load32x2U: + EmitS128Load(this, node, kRiscvS128Load64ExtendU, E64, m1); + break; + case LoadTransformation::kS128Load32Zero: + EmitS128Load(this, node, kRiscvS128Load32Zero, E32, m1); + break; + case LoadTransformation::kS128Load64Zero: + EmitS128Load(this, node, kRiscvS128Load64Zero, E64, m1); + break; + default: + UNIMPLEMENTED(); + } +} + +void InstructionSelector::VisitLoad(Node* node) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + + InstructionCode opcode = kArchNop; + switch (load_rep.representation()) { + case MachineRepresentation::kFloat32: + opcode = kRiscvLoadFloat; + break; + case MachineRepresentation::kFloat64: + opcode = kRiscvLoadDouble; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh; + break; + case MachineRepresentation::kWord32: + opcode = kRiscvLw; + break; +#ifdef V8_COMPRESS_POINTERS + case MachineRepresentation::kTaggedSigned: + opcode = kRiscvLoadDecompressTaggedSigned; + break; + case MachineRepresentation::kTaggedPointer: + opcode = kRiscvLoadDecompressTaggedPointer; + break; + case MachineRepresentation::kTagged: + opcode = kRiscvLoadDecompressAnyTagged; + break; +#else + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. +#endif + case MachineRepresentation::kWord64: + opcode = kRiscvLd; + break; + case MachineRepresentation::kSimd128: + opcode = kRiscvRvvLd; + break; + case MachineRepresentation::kCompressedPointer: + case MachineRepresentation::kCompressed: +#ifdef V8_COMPRESS_POINTERS + opcode = kRiscvLw; + break; +#else + // Fall through. +#endif + case MachineRepresentation::kSandboxedPointer: + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + EmitLoad(this, node, opcode); +} + +void InstructionSelector::VisitProtectedLoad(Node* node) { + // TODO(eholk) + UNIMPLEMENTED(); +} + +void InstructionSelector::VisitStore(Node* node) { + RiscvOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + StoreRepresentation store_rep = StoreRepresentationOf(node->op()); + WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); + MachineRepresentation rep = store_rep.representation(); + + // TODO(riscv): I guess this could be done in a better way. + if (write_barrier_kind != kNoWriteBarrier && + V8_LIKELY(!FLAG_disable_write_barriers)) { + DCHECK(CanBeTaggedPointer(rep)); + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + RecordWriteMode record_write_mode = + WriteBarrierKindToRecordWriteMode(write_barrier_kind); + InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; + size_t const temp_count = arraysize(temps); + InstructionCode code = kArchStoreWithWriteBarrier; + code |= MiscField::encode(static_cast(record_write_mode)); + Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); + } else { + ArchOpcode opcode; + switch (rep) { + case MachineRepresentation::kFloat32: + opcode = kRiscvStoreFloat; + break; + case MachineRepresentation::kFloat64: + opcode = kRiscvStoreDouble; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = kRiscvSb; + break; + case MachineRepresentation::kWord16: + opcode = kRiscvSh; + break; + case MachineRepresentation::kWord32: + opcode = kRiscvSw; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: +#ifdef V8_COMPRESS_POINTERS + opcode = kRiscvStoreCompressTagged; + break; +#endif + case MachineRepresentation::kWord64: + opcode = kRiscvSd; + break; + case MachineRepresentation::kSimd128: + opcode = kRiscvRvvSt; + break; + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: +#ifdef V8_COMPRESS_POINTERS + opcode = kRiscvStoreCompressTagged; + break; +#else + UNREACHABLE(); +#endif + case MachineRepresentation::kSandboxedPointer: + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegister(base), g.UseImmediate(index), + g.UseRegisterOrImmediateZero(value)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired store opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); + } + } +} + +void InstructionSelector::VisitProtectedStore(Node* node) { + // TODO(eholk) + UNIMPLEMENTED(); +} + +void InstructionSelector::VisitWord32And(Node* node) { + VisitBinop(this, node, kRiscvAnd32, true, kRiscvAnd32); +} + +void InstructionSelector::VisitWord64And(Node* node) { + RiscvOperandGenerator g(this); + Int64BinopMatcher m(node); + if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && + m.right().HasResolvedValue()) { + uint64_t mask = m.right().ResolvedValue(); + uint32_t mask_width = base::bits::CountPopulation(mask); + uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); + if ((mask_width != 0) && (mask_msb + mask_width == 64)) { + // The mask must be contiguous, and occupy the least-significant bits. + DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); + + // Select Dext for And(Shr(x, imm), mask) where the mask is in the least + // significant bits. + Int64BinopMatcher mleft(m.left().node()); + if (mleft.right().HasResolvedValue()) { + // Any shift value can match; int64 shifts use `value % 64`. + uint32_t lsb = + static_cast(mleft.right().ResolvedValue() & 0x3F); + + // Dext cannot extract bits past the register size, however since + // shifting the original value would have introduced some zeros we can + // still use Dext with a smaller mask and the remaining bits will be + // zeros. + if (lsb + mask_width > 64) mask_width = 64 - lsb; + + if (lsb == 0 && mask_width == 64) { + Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node())); + return; + } + } + // Other cases fall through to the normal And operation. + } + } + VisitBinop(this, node, kRiscvAnd, true, kRiscvAnd); +} + +void InstructionSelector::VisitWord32Or(Node* node) { + VisitBinop(this, node, kRiscvOr32, true, kRiscvOr32); +} + +void InstructionSelector::VisitWord64Or(Node* node) { + VisitBinop(this, node, kRiscvOr, true, kRiscvOr); +} + +void InstructionSelector::VisitWord32Xor(Node* node) { + Int32BinopMatcher m(node); + if (m.left().IsWord32Or() && CanCover(node, m.left().node()) && + m.right().Is(-1)) { + Int32BinopMatcher mleft(m.left().node()); + if (!mleft.right().HasResolvedValue()) { + RiscvOperandGenerator g(this); + Emit(kRiscvNor32, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + if (m.right().Is(-1)) { + // Use Nor for bit negation and eliminate constant loading for xori. + RiscvOperandGenerator g(this); + Emit(kRiscvNor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(0)); + return; + } + VisitBinop(this, node, kRiscvXor32, true, kRiscvXor32); +} + +void InstructionSelector::VisitWord64Xor(Node* node) { + Int64BinopMatcher m(node); + if (m.left().IsWord64Or() && CanCover(node, m.left().node()) && + m.right().Is(-1)) { + Int64BinopMatcher mleft(m.left().node()); + if (!mleft.right().HasResolvedValue()) { + RiscvOperandGenerator g(this); + Emit(kRiscvNor, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseRegister(mleft.right().node())); + return; + } + } + if (m.right().Is(-1)) { + // Use Nor for bit negation and eliminate constant loading for xori. + RiscvOperandGenerator g(this); + Emit(kRiscvNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(0)); + return; + } + VisitBinop(this, node, kRiscvXor, true, kRiscvXor); +} + +void InstructionSelector::VisitWord32Shl(Node* node) { + Int32BinopMatcher m(node); + if (m.left().IsWord32And() && CanCover(node, m.left().node()) && + m.right().IsInRange(1, 31)) { + RiscvOperandGenerator g(this); + Int32BinopMatcher mleft(m.left().node()); + // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is + // contiguous, and the shift immediate non-zero. + if (mleft.right().HasResolvedValue()) { + uint32_t mask = mleft.right().ResolvedValue(); + uint32_t mask_width = base::bits::CountPopulation(mask); + uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); + if ((mask_width != 0) && (mask_msb + mask_width == 32)) { + uint32_t shift = m.right().ResolvedValue(); + DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); + DCHECK_NE(0u, shift); + if ((shift + mask_width) >= 32) { + // If the mask is contiguous and reaches or extends beyond the top + // bit, only the shift is needed. + Emit(kRiscvShl32, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseImmediate(m.right().node())); + return; + } + } + } + } + VisitRRO(this, kRiscvShl32, node); +} + +void InstructionSelector::VisitWord32Shr(Node* node) { + VisitRRO(this, kRiscvShr32, node); +} + +void InstructionSelector::VisitWord32Sar(Node* node) { + Int32BinopMatcher m(node); + if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) { + Int32BinopMatcher mleft(m.left().node()); + if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) { + RiscvOperandGenerator g(this); + uint32_t sar = m.right().ResolvedValue(); + uint32_t shl = mleft.right().ResolvedValue(); + if ((sar == shl) && (sar == 16)) { + Emit(kRiscvSignExtendShort, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node())); + return; + } else if ((sar == shl) && (sar == 24)) { + Emit(kRiscvSignExtendByte, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node())); + return; + } else if ((sar == shl) && (sar == 32)) { + Emit(kRiscvShl32, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(0)); + return; + } + } + } + VisitRRO(this, kRiscvSar32, node); +} + +void InstructionSelector::VisitWord64Shl(Node* node) { + RiscvOperandGenerator g(this); + Int64BinopMatcher m(node); + if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && + m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) { + // There's no need to sign/zero-extend to 64-bit if we shift out the upper + // 32 bits anyway. + Emit(kRiscvShl64, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()->InputAt(0)), + g.UseImmediate(m.right().node())); + return; + } + if (m.left().IsWord64And() && CanCover(node, m.left().node()) && + m.right().IsInRange(1, 63)) { + // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is + // contiguous, and the shift immediate non-zero. + Int64BinopMatcher mleft(m.left().node()); + if (mleft.right().HasResolvedValue()) { + uint64_t mask = mleft.right().ResolvedValue(); + uint32_t mask_width = base::bits::CountPopulation(mask); + uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); + if ((mask_width != 0) && (mask_msb + mask_width == 64)) { + uint64_t shift = m.right().ResolvedValue(); + DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); + DCHECK_NE(0u, shift); + + if ((shift + mask_width) >= 64) { + // If the mask is contiguous and reaches or extends beyond the top + // bit, only the shift is needed. + Emit(kRiscvShl64, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseImmediate(m.right().node())); + return; + } + } + } + } + VisitRRO(this, kRiscvShl64, node); +} + +void InstructionSelector::VisitWord64Shr(Node* node) { + VisitRRO(this, kRiscvShr64, node); +} + +void InstructionSelector::VisitWord64Sar(Node* node) { + if (TryEmitExtendingLoad(this, node, node)) return; + VisitRRO(this, kRiscvSar64, node); +} + +void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); } + +void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); } + +void InstructionSelector::VisitWord32Ror(Node* node) { + VisitRRO(this, kRiscvRor32, node); +} + +void InstructionSelector::VisitWord32Clz(Node* node) { + VisitRR(this, kRiscvClz32, node); +} + +void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } + +void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); } + +void InstructionSelector::VisitWord64ReverseBytes(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvByteSwap64, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitWord32ReverseBytes(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvByteSwap32, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + +void InstructionSelector::VisitWord32Ctz(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvCtz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitWord64Ctz(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvCtz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitWord32Popcnt(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvPopcnt32, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitWord64Popcnt(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvPopcnt64, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitWord64Ror(Node* node) { + VisitRRO(this, kRiscvRor64, node); +} + +void InstructionSelector::VisitWord64Clz(Node* node) { + VisitRR(this, kRiscvClz64, node); +} + +void InstructionSelector::VisitInt32Add(Node* node) { + VisitBinop(this, node, kRiscvAdd32, true, kRiscvAdd32); +} + +void InstructionSelector::VisitInt64Add(Node* node) { + VisitBinop(this, node, kRiscvAdd64, true, kRiscvAdd64); +} + +void InstructionSelector::VisitInt32Sub(Node* node) { + VisitBinop(this, node, kRiscvSub32); +} + +void InstructionSelector::VisitInt64Sub(Node* node) { + VisitBinop(this, node, kRiscvSub64); +} + +void InstructionSelector::VisitInt32Mul(Node* node) { + RiscvOperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) { + uint32_t value = static_cast(m.right().ResolvedValue()); + if (base::bits::IsPowerOfTwo(value)) { + Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None), + g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(base::bits::WhichPowerOfTwo(value))); + return; + } + if (base::bits::IsPowerOfTwo(value + 1)) { + InstructionOperand temp = g.TempRegister(); + Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None), temp, + g.UseRegister(m.left().node()), + g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); + Emit(kRiscvSub32 | AddressingModeField::encode(kMode_None), + g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); + return; + } + } + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + if (CanCover(node, left) && CanCover(node, right)) { + if (left->opcode() == IrOpcode::kWord64Sar && + right->opcode() == IrOpcode::kWord64Sar) { + Int64BinopMatcher leftInput(left), rightInput(right); + if (leftInput.right().Is(32) && rightInput.right().Is(32)) { + // Combine untagging shifts with Dmul high. + Emit(kRiscvMulHigh64, g.DefineSameAsFirst(node), + g.UseRegister(leftInput.left().node()), + g.UseRegister(rightInput.left().node())); + return; + } + } + } + VisitRRR(this, kRiscvMul32, node); +} + +void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand src1 = g.TempSimd128Register(); + InstructionOperand src2 = g.TempSimd128Register(); + InstructionOperand src = g.UseUniqueRegister(node->InputAt(0)); + Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0006000400020000), + g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0007000500030001), + g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVwadd, g.DefineAsRegister(node), src1, src2, + g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(mf2))); +} + +void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand src1 = g.TempSimd128Register(); + InstructionOperand src2 = g.TempSimd128Register(); + InstructionOperand src = g.UseUniqueRegister(node->InputAt(0)); + Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0006000400020000), + g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0007000500030001), + g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVwaddu, g.DefineAsRegister(node), src1, src2, + g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(mf2))); +} + +void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand src1 = g.TempSimd128Register(); + InstructionOperand src2 = g.TempSimd128Register(); + InstructionOperand src = g.UseUniqueRegister(node->InputAt(0)); + Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0E0C0A0806040200), + g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0F0D0B0907050301), + g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVwadd, g.DefineAsRegister(node), src1, src2, + g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(mf2))); +} + +void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand src1 = g.TempSimd128Register(); + InstructionOperand src2 = g.TempSimd128Register(); + InstructionOperand src = g.UseUniqueRegister(node->InputAt(0)); + Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0E0C0A0806040200), + g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0F0D0B0907050301), + g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1))); + Emit(kRiscvVwaddu, g.DefineAsRegister(node), src1, src2, + g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(mf2))); +} + +void InstructionSelector::VisitInt32MulHigh(Node* node) { + VisitRRR(this, kRiscvMulHigh32, node); +} + +void InstructionSelector::VisitUint32MulHigh(Node* node) { + VisitRRR(this, kRiscvMulHighU32, node); +} + +void InstructionSelector::VisitInt64Mul(Node* node) { + RiscvOperandGenerator g(this); + Int64BinopMatcher m(node); + // TODO(dusmil): Add optimization for shifts larger than 32. + if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) { + uint64_t value = static_cast(m.right().ResolvedValue()); + if (base::bits::IsPowerOfTwo(value)) { + Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None), + g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(base::bits::WhichPowerOfTwo(value))); + return; + } + if (base::bits::IsPowerOfTwo(value + 1)) { + InstructionOperand temp = g.TempRegister(); + Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None), temp, + g.UseRegister(m.left().node()), + g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); + Emit(kRiscvSub64 | AddressingModeField::encode(kMode_None), + g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); + return; + } + } + Emit(kRiscvMul64, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitInt32Div(Node* node) { + RiscvOperandGenerator g(this); + Int32BinopMatcher m(node); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + if (CanCover(node, left) && CanCover(node, right)) { + if (left->opcode() == IrOpcode::kWord64Sar && + right->opcode() == IrOpcode::kWord64Sar) { + Int64BinopMatcher rightInput(right), leftInput(left); + if (rightInput.right().Is(32) && leftInput.right().Is(32)) { + // Combine both shifted operands with Ddiv. + Emit(kRiscvDiv64, g.DefineSameAsFirst(node), + g.UseRegister(leftInput.left().node()), + g.UseRegister(rightInput.left().node())); + return; + } + } + } + Emit(kRiscvDiv32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitUint32Div(Node* node) { + RiscvOperandGenerator g(this); + Int32BinopMatcher m(node); + Emit(kRiscvDivU32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitInt32Mod(Node* node) { + RiscvOperandGenerator g(this); + Int32BinopMatcher m(node); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + if (CanCover(node, left) && CanCover(node, right)) { + if (left->opcode() == IrOpcode::kWord64Sar && + right->opcode() == IrOpcode::kWord64Sar) { + Int64BinopMatcher rightInput(right), leftInput(left); + if (rightInput.right().Is(32) && leftInput.right().Is(32)) { + // Combine both shifted operands with Dmod. + Emit(kRiscvMod64, g.DefineSameAsFirst(node), + g.UseRegister(leftInput.left().node()), + g.UseRegister(rightInput.left().node())); + return; + } + } + } + Emit(kRiscvMod32, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitUint32Mod(Node* node) { + RiscvOperandGenerator g(this); + Int32BinopMatcher m(node); + Emit(kRiscvModU32, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitInt64Div(Node* node) { + RiscvOperandGenerator g(this); + Int64BinopMatcher m(node); + Emit(kRiscvDiv64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitUint64Div(Node* node) { + RiscvOperandGenerator g(this); + Int64BinopMatcher m(node); + Emit(kRiscvDivU64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitInt64Mod(Node* node) { + RiscvOperandGenerator g(this); + Int64BinopMatcher m(node); + Emit(kRiscvMod64, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitUint64Mod(Node* node) { + RiscvOperandGenerator g(this); + Int64BinopMatcher m(node); + Emit(kRiscvModU64, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.UseRegister(m.right().node())); +} + +void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { + VisitRR(this, kRiscvCvtDS, node); +} + +void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) { + VisitRR(this, kRiscvCvtSW, node); +} + +void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) { + VisitRR(this, kRiscvCvtSUw, node); +} + +void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { + VisitRR(this, kRiscvCvtDW, node); +} + +void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) { + VisitRR(this, kRiscvCvtDL, node); +} + +void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { + VisitRR(this, kRiscvCvtDUw, node); +} + +void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { + RiscvOperandGenerator g(this); + InstructionCode opcode = kRiscvTruncWS; + TruncateKind kind = OpParameter(node->op()); + if (kind == TruncateKind::kSetOverflowToMin) { + opcode |= MiscField::encode(true); + } + Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) { + RiscvOperandGenerator g(this); + InstructionCode opcode = kRiscvTruncUwS; + TruncateKind kind = OpParameter(node->op()); + if (kind == TruncateKind::kSetOverflowToMin) { + opcode |= MiscField::encode(true); + } + Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { + RiscvOperandGenerator g(this); + Node* value = node->InputAt(0); + // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction + // which does rounding and conversion to integer format. + if (CanCover(node, value)) { + switch (value->opcode()) { + case IrOpcode::kFloat64RoundDown: + Emit(kRiscvFloorWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + case IrOpcode::kFloat64RoundUp: + Emit(kRiscvCeilWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + case IrOpcode::kFloat64RoundTiesEven: + Emit(kRiscvRoundWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + case IrOpcode::kFloat64RoundTruncate: + Emit(kRiscvTruncWD, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + default: + break; + } + if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) { + Node* next = value->InputAt(0); + if (CanCover(value, next)) { + // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP)) + switch (next->opcode()) { + case IrOpcode::kFloat32RoundDown: + Emit(kRiscvFloorWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + case IrOpcode::kFloat32RoundUp: + Emit(kRiscvCeilWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + case IrOpcode::kFloat32RoundTiesEven: + Emit(kRiscvRoundWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + case IrOpcode::kFloat32RoundTruncate: + Emit(kRiscvTruncWS, g.DefineAsRegister(node), + g.UseRegister(next->InputAt(0))); + return; + default: + Emit(kRiscvTruncWS, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + } + } else { + // Match float32 -> float64 -> int32 representation change path. + Emit(kRiscvTruncWS, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + } + } + } + VisitRR(this, kRiscvTruncWD, node); +} + +void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) { + VisitRR(this, kRiscvTruncLD, node); +} + +void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { + VisitRR(this, kRiscvTruncUwD, node); +} + +void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) { + VisitRR(this, kRiscvTruncUlD, node); +} + +void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) { + VisitRR(this, kRiscvTruncUwD, node); +} + +void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) { + RiscvOperandGenerator g(this); + InstructionCode opcode = kRiscvTruncLD; + TruncateKind kind = OpParameter(node->op()); + if (kind == TruncateKind::kSetOverflowToMin) { + opcode |= MiscField::encode(true); + } + Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; + InstructionOperand outputs[2]; + size_t output_count = 0; + outputs[output_count++] = g.DefineAsRegister(node); + + Node* success_output = NodeProperties::FindProjection(node, 1); + if (success_output) { + outputs[output_count++] = g.DefineAsRegister(success_output); + } + + this->Emit(kRiscvTruncLS, output_count, outputs, 1, inputs); +} + +void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; + InstructionOperand outputs[2]; + size_t output_count = 0; + outputs[output_count++] = g.DefineAsRegister(node); + + Node* success_output = NodeProperties::FindProjection(node, 1); + if (success_output) { + outputs[output_count++] = g.DefineAsRegister(success_output); + } + + Emit(kRiscvTruncLD, output_count, outputs, 1, inputs); +} + +void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; + InstructionOperand outputs[2]; + size_t output_count = 0; + outputs[output_count++] = g.DefineAsRegister(node); + + Node* success_output = NodeProperties::FindProjection(node, 1); + if (success_output) { + outputs[output_count++] = g.DefineAsRegister(success_output); + } + + Emit(kRiscvTruncUlS, output_count, outputs, 1, inputs); +} + +void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { + RiscvOperandGenerator g(this); + + InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; + InstructionOperand outputs[2]; + size_t output_count = 0; + outputs[output_count++] = g.DefineAsRegister(node); + + Node* success_output = NodeProperties::FindProjection(node, 1); + if (success_output) { + outputs[output_count++] = g.DefineAsRegister(success_output); + } + + Emit(kRiscvTruncUlD, output_count, outputs, 1, inputs); +} + +void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { + DCHECK(SmiValuesAre31Bits()); + DCHECK(COMPRESS_POINTERS_BOOL); + RiscvOperandGenerator g(this); + Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void EmitSignExtendWord(InstructionSelector* selector, Node* node) { + RiscvOperandGenerator g(selector); + Node* value = node->InputAt(0); + IrOpcode::Value lastOpCode = value->opcode(); + if (lastOpCode == IrOpcode::kInt32Add || lastOpCode == IrOpcode::kInt32Sub || + lastOpCode == IrOpcode::kWord32And || lastOpCode == IrOpcode::kWord32Or || + lastOpCode == IrOpcode::kWord32Xor || + lastOpCode == IrOpcode::kWord32Shl || + lastOpCode == IrOpcode::kWord32Shr || + lastOpCode == IrOpcode::kWord32Sar || + lastOpCode == IrOpcode::kUint32Mod) { + selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + return; + } + if (lastOpCode == IrOpcode::kInt32Mul) { + Node* left = value->InputAt(0); + Node* right = value->InputAt(1); + if (selector->CanCover(value, left) && selector->CanCover(value, right)) { + if (left->opcode() == IrOpcode::kWord64Sar && + right->opcode() == IrOpcode::kWord64Sar) { + Int64BinopMatcher leftInput(left), rightInput(right); + if (leftInput.right().Is(32) && rightInput.right().Is(32)) { + selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node), + g.UseRegister(value)); + return; + } + } + } + selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + return; + } + if (lastOpCode == IrOpcode::kInt32Mod) { + Node* left = value->InputAt(0); + Node* right = value->InputAt(1); + if (selector->CanCover(value, left) && selector->CanCover(value, right)) { + if (left->opcode() == IrOpcode::kWord64Sar && + right->opcode() == IrOpcode::kWord64Sar) { + Int64BinopMatcher rightInput(right), leftInput(left); + if (rightInput.right().Is(32) && leftInput.right().Is(32)) { + // Combine both shifted operands with Dmod. + selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node), + g.UseRegister(value)); + return; + } + } + } + selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + return; + } + selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node), + g.UseRegister(value)); +} + +void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { + Node* value = node->InputAt(0); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kLoadImmutable) && + CanCover(node, value)) { + // Generate sign-extending load. + LoadRepresentation load_rep = LoadRepresentationOf(value->op()); + InstructionCode opcode = kArchNop; + switch (load_rep.representation()) { + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh; + break; + case MachineRepresentation::kWord32: + opcode = kRiscvLw; + break; + default: + UNREACHABLE(); + } + EmitLoad(this, value, opcode, node); + } else { + EmitSignExtendWord(this, node); + } +} + +bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { + DCHECK_NE(node->opcode(), IrOpcode::kPhi); + if (node->opcode() == IrOpcode::kLoad || + node->opcode() == IrOpcode::kLoadImmutable) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + if (load_rep.IsUnsigned()) { + switch (load_rep.representation()) { + case MachineRepresentation::kWord8: + case MachineRepresentation::kWord16: + return true; + default: + return false; + } + } + } + + // All other 32-bit operations sign-extend to the upper 32 bits + return false; +} + +void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { + RiscvOperandGenerator g(this); + Node* value = node->InputAt(0); + if (ZeroExtendsWord32ToWord64(value)) { + Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + return; + } + Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { + RiscvOperandGenerator g(this); + Node* value = node->InputAt(0); + if (CanCover(node, value)) { + switch (value->opcode()) { + case IrOpcode::kWord64Sar: { + if (CanCover(value, value->InputAt(0)) && + TryEmitExtendingLoad(this, value, node)) { + return; + } else { + Int64BinopMatcher m(value); + if (m.right().IsInRange(32, 63)) { + // After smi untagging no need for truncate. Combine sequence. + Emit(kRiscvSar64, g.DefineSameAsFirst(node), + g.UseRegister(m.left().node()), + g.UseImmediate(m.right().node())); + return; + } + } + break; + } + default: + break; + } + } + + // Semantics of this machine IR is not clear. For example, x86 zero-extend the + // truncated value; arm treats it as nop thus the upper 32-bit as undefined; + // Riscv emits ext instruction which zero-extend the 32-bit value; for riscv, + // we do sign-extension of the truncated value + EmitSignExtendWord(this, node); +} + +void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { + RiscvOperandGenerator g(this); + Node* value = node->InputAt(0); + // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding + // instruction. + if (CanCover(node, value) && + value->opcode() == IrOpcode::kChangeInt32ToFloat64) { + Emit(kRiscvCvtSW, g.DefineAsRegister(node), + g.UseRegister(value->InputAt(0))); + return; + } + VisitRR(this, kRiscvCvtSD, node); +} + +void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { + VisitRR(this, kArchTruncateDoubleToI, node); +} + +void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { + VisitRR(this, kRiscvTruncWD, node); +} + +void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) { + VisitRR(this, kRiscvCvtSL, node); +} + +void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) { + VisitRR(this, kRiscvCvtDL, node); +} + +void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) { + VisitRR(this, kRiscvCvtSUl, node); +} + +void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) { + VisitRR(this, kRiscvCvtDUl, node); +} + +void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { + VisitRR(this, kRiscvBitcastFloat32ToInt32, node); +} + +void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { + VisitRR(this, kRiscvBitcastDL, node); +} + +void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { + VisitRR(this, kRiscvBitcastInt32ToFloat32, node); +} + +void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { + VisitRR(this, kRiscvBitcastLD, node); +} + +void InstructionSelector::VisitFloat32Add(Node* node) { + VisitRRR(this, kRiscvAddS, node); +} + +void InstructionSelector::VisitFloat64Add(Node* node) { + VisitRRR(this, kRiscvAddD, node); +} + +void InstructionSelector::VisitFloat32Sub(Node* node) { + VisitRRR(this, kRiscvSubS, node); +} + +void InstructionSelector::VisitFloat64Sub(Node* node) { + VisitRRR(this, kRiscvSubD, node); +} + +void InstructionSelector::VisitFloat32Mul(Node* node) { + VisitRRR(this, kRiscvMulS, node); +} + +void InstructionSelector::VisitFloat64Mul(Node* node) { + VisitRRR(this, kRiscvMulD, node); +} + +void InstructionSelector::VisitFloat32Div(Node* node) { + VisitRRR(this, kRiscvDivS, node); +} + +void InstructionSelector::VisitFloat64Div(Node* node) { + VisitRRR(this, kRiscvDivD, node); +} + +void InstructionSelector::VisitFloat64Mod(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvModD, g.DefineAsFixed(node, fa0), + g.UseFixed(node->InputAt(0), fa0), g.UseFixed(node->InputAt(1), fa1)) + ->MarkAsCall(); +} + +void InstructionSelector::VisitFloat32Max(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvFloat32Max, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat64Max(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvFloat64Max, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat32Min(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvFloat32Min, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat64Min(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvFloat64Min, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); +} + +void InstructionSelector::VisitFloat32Abs(Node* node) { + VisitRR(this, kRiscvAbsS, node); +} + +void InstructionSelector::VisitFloat64Abs(Node* node) { + VisitRR(this, kRiscvAbsD, node); +} + +void InstructionSelector::VisitFloat32Sqrt(Node* node) { + VisitRR(this, kRiscvSqrtS, node); +} + +void InstructionSelector::VisitFloat64Sqrt(Node* node) { + VisitRR(this, kRiscvSqrtD, node); +} + +void InstructionSelector::VisitFloat32RoundDown(Node* node) { + VisitRR(this, kRiscvFloat32RoundDown, node); +} + +void InstructionSelector::VisitFloat64RoundDown(Node* node) { + VisitRR(this, kRiscvFloat64RoundDown, node); +} + +void InstructionSelector::VisitFloat32RoundUp(Node* node) { + VisitRR(this, kRiscvFloat32RoundUp, node); +} + +void InstructionSelector::VisitFloat64RoundUp(Node* node) { + VisitRR(this, kRiscvFloat64RoundUp, node); +} + +void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { + VisitRR(this, kRiscvFloat32RoundTruncate, node); +} + +void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { + VisitRR(this, kRiscvFloat64RoundTruncate, node); +} + +void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { + UNREACHABLE(); +} + +void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { + VisitRR(this, kRiscvFloat32RoundTiesEven, node); +} + +void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { + VisitRR(this, kRiscvFloat64RoundTiesEven, node); +} + +void InstructionSelector::VisitFloat32Neg(Node* node) { + VisitRR(this, kRiscvNegS, node); +} + +void InstructionSelector::VisitFloat64Neg(Node* node) { + VisitRR(this, kRiscvNegD, node); +} + +void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, + InstructionCode opcode) { + RiscvOperandGenerator g(this); + Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa0), + g.UseFixed(node->InputAt(1), fa1)) + ->MarkAsCall(); +} + +void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, + InstructionCode opcode) { + RiscvOperandGenerator g(this); + Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa1)) + ->MarkAsCall(); +} + +void InstructionSelector::EmitPrepareArguments( + ZoneVector* arguments, const CallDescriptor* call_descriptor, + Node* node) { + RiscvOperandGenerator g(this); + + // Prepare for C function call. + if (call_descriptor->IsCFunctionCall()) { + Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast( + call_descriptor->ParameterCount())), + 0, nullptr, 0, nullptr); + + // Poke any stack arguments. + int slot = kCArgSlotCount; + for (PushParameter input : (*arguments)) { + Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), + g.TempImmediate(slot << kSystemPointerSizeLog2)); + ++slot; + } + } else { + int push_count = static_cast(call_descriptor->ParameterSlotCount()); + if (push_count > 0) { + // Calculate needed space + int stack_size = 0; + for (PushParameter input : (*arguments)) { + if (input.node) { + stack_size += input.location.GetSizeInPointers(); + } + } + Emit(kRiscvStackClaim, g.NoOutput(), + g.TempImmediate(stack_size << kSystemPointerSizeLog2)); + } + for (size_t n = 0; n < arguments->size(); ++n) { + PushParameter input = (*arguments)[n]; + if (input.node) { + Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), + g.TempImmediate(static_cast(n << kSystemPointerSizeLog2))); + } + } + } +} + +void InstructionSelector::EmitPrepareResults( + ZoneVector* results, const CallDescriptor* call_descriptor, + Node* node) { + RiscvOperandGenerator g(this); + + int reverse_slot = 1; + for (PushParameter output : *results) { + if (!output.location.IsCallerFrameSlot()) continue; + // Skip any alignment holes in nodes. + if (output.node != nullptr) { + DCHECK(!call_descriptor->IsCFunctionCall()); + if (output.location.GetType() == MachineType::Float32()) { + MarkAsFloat32(output.node); + } else if (output.location.GetType() == MachineType::Float64()) { + MarkAsFloat64(output.node); + } + Emit(kRiscvPeek, g.DefineAsRegister(output.node), + g.UseImmediate(reverse_slot)); + } + reverse_slot += output.location.GetSizeInPointers(); + } +} + +bool InstructionSelector::IsTailCallAddressImmediate() { return false; } + +void InstructionSelector::VisitUnalignedLoad(Node* node) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + RiscvOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + ArchOpcode opcode; + switch (load_rep.representation()) { + case MachineRepresentation::kFloat32: + opcode = kRiscvULoadFloat; + break; + case MachineRepresentation::kFloat64: + opcode = kRiscvULoadDouble; + break; + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh; + break; + case MachineRepresentation::kWord32: + opcode = kRiscvUlw; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kWord64: + opcode = kRiscvUld; + break; + case MachineRepresentation::kSimd128: + opcode = kRiscvRvvLd; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: // Fall through. + case MachineRepresentation::kSandboxedPointer: + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); + } +} + +void InstructionSelector::VisitUnalignedStore(Node* node) { + RiscvOperandGenerator g(this); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op()); + ArchOpcode opcode; + switch (rep) { + case MachineRepresentation::kFloat32: + opcode = kRiscvUStoreFloat; + break; + case MachineRepresentation::kFloat64: + opcode = kRiscvUStoreDouble; + break; + case MachineRepresentation::kWord8: + opcode = kRiscvSb; + break; + case MachineRepresentation::kWord16: + opcode = kRiscvUsh; + break; + case MachineRepresentation::kWord32: + opcode = kRiscvUsw; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kWord64: + opcode = kRiscvUsd; + break; + case MachineRepresentation::kSimd128: + opcode = kRiscvRvvSt; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: // Fall through. + case MachineRepresentation::kSandboxedPointer: + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } + + if (g.CanBeImmediate(index, opcode)) { + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + g.UseRegister(base), g.UseImmediate(index), + g.UseRegisterOrImmediateZero(value)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg, + g.UseRegister(index), g.UseRegister(base)); + // Emit desired store opcode, using temp addr_reg. + Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), + addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); + } +} + +namespace { + +// Shared routine for multiple compare operations. +static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, + InstructionOperand left, InstructionOperand right, + FlagsContinuation* cont) { + selector->EmitWithContinuation(opcode, left, right, cont); +} + +// Shared routine for multiple compare operations. +static void VisitWordCompareZero(InstructionSelector* selector, + InstructionOperand value, + FlagsContinuation* cont) { + selector->EmitWithContinuation(kRiscvCmpZero, value, cont); +} + +// Shared routine for multiple float32 compare operations. +void VisitFloat32Compare(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + RiscvOperandGenerator g(selector); + Float32BinopMatcher m(node); + InstructionOperand lhs, rhs; + + lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) + : g.UseRegister(m.left().node()); + rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) + : g.UseRegister(m.right().node()); + VisitCompare(selector, kRiscvCmpS, lhs, rhs, cont); +} + +// Shared routine for multiple float64 compare operations. +void VisitFloat64Compare(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + RiscvOperandGenerator g(selector); + Float64BinopMatcher m(node); + InstructionOperand lhs, rhs; + + lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) + : g.UseRegister(m.left().node()); + rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) + : g.UseRegister(m.right().node()); + VisitCompare(selector, kRiscvCmpD, lhs, rhs, cont); +} + +// Shared routine for multiple word compare operations. +void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont, + bool commutative) { + RiscvOperandGenerator g(selector); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + // If one of the two inputs is an immediate, make sure it's on the right. + if (!g.CanBeImmediate(right, opcode) && g.CanBeImmediate(left, opcode)) { + cont->Commute(); + std::swap(left, right); + } + // Match immediates on right side of comparison. + if (g.CanBeImmediate(right, opcode)) { + if (opcode == kRiscvTst) { + VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), + cont); + } else { + switch (cont->condition()) { + case kEqual: + case kNotEqual: + if (cont->IsSet()) { + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseImmediate(right), cont); + } else { + Int32BinopMatcher m(node, true); + NumberBinopMatcher n(node, true); + if (m.right().Is(0) || n.right().IsZero()) { + VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left), + cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseRegister(right), cont); + } + } + break; + case kSignedLessThan: + case kSignedGreaterThanOrEqual: + case kUnsignedLessThan: + case kUnsignedGreaterThanOrEqual: { + Int32BinopMatcher m(node, true); + if (m.right().Is(0)) { + VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left), + cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseImmediate(right), cont); + } + } break; + default: + Int32BinopMatcher m(node, true); + if (m.right().Is(0)) { + VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left), + cont); + } else { + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseRegister(right), cont); + } + } + } + } else { + VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), + cont); + } +} +#ifndef V8_COMPRESS_POINTERS +bool IsNodeUnsigned(Node* n) { + NodeMatcher m(n); + + if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) { + LoadRepresentation load_rep = LoadRepresentationOf(n->op()); + return load_rep.IsUnsigned(); + } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { + AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op()); + LoadRepresentation load_rep = atomic_load_params.representation(); + return load_rep.IsUnsigned(); + } else { + return m.IsUint32Div() || m.IsUint32LessThan() || + m.IsUint32LessThanOrEqual() || m.IsUint32Mod() || + m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() || + m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32(); + } +} +#endif + +// Shared routine for multiple word compare operations. +void VisitFullWord32Compare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont) { + RiscvOperandGenerator g(selector); + InstructionOperand leftOp = g.TempRegister(); + InstructionOperand rightOp = g.TempRegister(); + + selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)), + g.TempImmediate(32)); + selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)), + g.TempImmediate(32)); + + VisitCompare(selector, opcode, leftOp, rightOp, cont); +} + +#ifndef V8_COMPRESS_POINTERS +void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node, + InstructionCode opcode, + FlagsContinuation* cont) { + if (FLAG_debug_code) { + RiscvOperandGenerator g(selector); + InstructionOperand leftOp = g.TempRegister(); + InstructionOperand rightOp = g.TempRegister(); + InstructionOperand optimizedResult = g.TempRegister(); + InstructionOperand fullResult = g.TempRegister(); + FlagsCondition condition = cont->condition(); + InstructionCode testOpcode = opcode | + FlagsConditionField::encode(condition) | + FlagsModeField::encode(kFlags_set); + + selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1))); + + selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)), + g.TempImmediate(32)); + selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)), + g.TempImmediate(32)); + selector->Emit(testOpcode, fullResult, leftOp, rightOp); + + selector->Emit(kRiscvAssertEqual, g.NoOutput(), optimizedResult, fullResult, + g.TempImmediate(static_cast( + AbortReason::kUnsupportedNonPrimitiveCompare))); + } + + VisitWordCompare(selector, node, opcode, cont, false); +} +#endif +void VisitWord32Compare(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + // RISC-V doesn't support Word32 compare instructions. Instead it relies + // that the values in registers are correctly sign-extended and uses + // Word64 comparison instead. This behavior is correct in most cases, + // but doesn't work when comparing signed with unsigned operands. + // We could simulate full Word32 compare in all cases but this would + // create an unnecessary overhead since unsigned integers are rarely + // used in JavaScript. + // The solution proposed here tries to match a comparison of signed + // with unsigned operand, and perform full Word32Compare only + // in those cases. Unfortunately, the solution is not complete because + // it might skip cases where Word32 full compare is needed, so + // basically it is a hack. + // When call to a host function in simulator, if the function return a + // int32 value, the simulator do not sign-extended to int64 because in + // simulator we do not know the function whether return a int32 or int64. + // so we need do a full word32 compare in this case. +#ifndef V8_COMPRESS_POINTERS +#ifndef USE_SIMULATOR + if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) { +#else + if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) || + node->InputAt(0)->opcode() == IrOpcode::kCall || + node->InputAt(1)->opcode() == IrOpcode::kCall) { +#endif + VisitFullWord32Compare(selector, node, kRiscvCmp, cont); + } else { + VisitOptimizedWord32Compare(selector, node, kRiscvCmp, cont); + } +#else + VisitFullWord32Compare(selector, node, kRiscvCmp, cont); +#endif +} + +void VisitWord64Compare(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + VisitWordCompare(selector, node, kRiscvCmp, cont, false); +} + +void EmitWordCompareZero(InstructionSelector* selector, Node* value, + FlagsContinuation* cont) { + RiscvOperandGenerator g(selector); + selector->EmitWithContinuation(kRiscvCmpZero, + g.UseRegisterOrImmediateZero(value), cont); +} + +void VisitAtomicLoad(InstructionSelector* selector, Node* node, + ArchOpcode opcode, AtomicWidth width) { + RiscvOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + if (g.CanBeImmediate(index, opcode)) { + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) | + AtomicWidthField::encode(width), + g.DefineAsRegister(node), g.UseRegister(base), + g.UseImmediate(index)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), + addr_reg, g.UseRegister(index), g.UseRegister(base)); + // Emit desired load opcode, using temp addr_reg. + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) | + AtomicWidthField::encode(width), + g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); + } +} + +void VisitAtomicStore(InstructionSelector* selector, Node* node, + ArchOpcode opcode, AtomicWidth width) { + RiscvOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + if (g.CanBeImmediate(index, opcode)) { + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) | + AtomicWidthField::encode(width), + g.NoOutput(), g.UseRegister(base), g.UseImmediate(index), + g.UseRegisterOrImmediateZero(value)); + } else { + InstructionOperand addr_reg = g.TempRegister(); + selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), + addr_reg, g.UseRegister(index), g.UseRegister(base)); + // Emit desired store opcode, using temp addr_reg. + selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) | + AtomicWidthField::encode(width), + g.NoOutput(), addr_reg, g.TempImmediate(0), + g.UseRegisterOrImmediateZero(value)); + } +} + +void VisitAtomicExchange(InstructionSelector* selector, Node* node, + ArchOpcode opcode, AtomicWidth width) { + RiscvOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | + AtomicWidthField::encode(width); + selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); +} + +void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, + ArchOpcode opcode, AtomicWidth width) { + RiscvOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* old_value = node->InputAt(2); + Node* new_value = node->InputAt(3); + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[4]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(old_value); + inputs[input_count++] = g.UseUniqueRegister(new_value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temp[3]; + temp[0] = g.TempRegister(); + temp[1] = g.TempRegister(); + temp[2] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | + AtomicWidthField::encode(width); + selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); +} + +void VisitAtomicBinop(InstructionSelector* selector, Node* node, + ArchOpcode opcode, AtomicWidth width) { + RiscvOperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + + AddressingMode addressing_mode = kMode_MRI; + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseUniqueRegister(base); + inputs[input_count++] = g.UseUniqueRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionOperand outputs[1]; + outputs[0] = g.UseUniqueRegister(node); + InstructionOperand temps[4]; + temps[0] = g.TempRegister(); + temps[1] = g.TempRegister(); + temps[2] = g.TempRegister(); + temps[3] = g.TempRegister(); + InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | + AtomicWidthField::encode(width); + selector->Emit(code, 1, outputs, input_count, inputs, 4, temps); +} + +} // namespace + +void InstructionSelector::VisitStackPointerGreaterThan( + Node* node, FlagsContinuation* cont) { + StackCheckKind kind = StackCheckKindOf(node->op()); + InstructionCode opcode = + kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); + + RiscvOperandGenerator g(this); + + // No outputs. + InstructionOperand* const outputs = nullptr; + const int output_count = 0; + + // Applying an offset to this stack check requires a temp register. Offsets + // are only applied to the first stack check. If applying an offset, we must + // ensure the input and temp registers do not alias, thus kUniqueRegister. + InstructionOperand temps[] = {g.TempRegister()}; + const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0); + const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) + ? OperandGenerator::kUniqueRegister + : OperandGenerator::kRegister; + + Node* const value = node->InputAt(0); + InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; + static constexpr int input_count = arraysize(inputs); + + EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, + temp_count, temps, cont); +} + +// Shared routine for word comparisons against zero. +void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, + FlagsContinuation* cont) { + // Try to combine with comparisons against 0 by simply inverting the branch. + while (CanCover(user, value)) { + if (value->opcode() == IrOpcode::kWord32Equal) { + Int32BinopMatcher m(value); + if (!m.right().Is(0)) break; + user = value; + value = m.left().node(); + } else if (value->opcode() == IrOpcode::kWord64Equal) { + Int64BinopMatcher m(value); + if (!m.right().Is(0)) break; + user = value; + value = m.left().node(); + } else { + break; + } + + cont->Negate(); + } + + if (CanCover(user, value)) { + switch (value->opcode()) { + case IrOpcode::kWord32Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitWord32Compare(this, value, cont); + case IrOpcode::kInt32LessThan: + cont->OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWord32Compare(this, value, cont); + case IrOpcode::kInt32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWord32Compare(this, value, cont); + case IrOpcode::kUint32LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitWord32Compare(this, value, cont); + case IrOpcode::kUint32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitWord32Compare(this, value, cont); + case IrOpcode::kWord64Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitWord64Compare(this, value, cont); + case IrOpcode::kInt64LessThan: + cont->OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWord64Compare(this, value, cont); + case IrOpcode::kInt64LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWord64Compare(this, value, cont); + case IrOpcode::kUint64LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitWord64Compare(this, value, cont); + case IrOpcode::kUint64LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitWord64Compare(this, value, cont); + case IrOpcode::kFloat32Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat32LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat32LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat32Compare(this, value, cont); + case IrOpcode::kFloat64Equal: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kFloat64LessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kFloat64LessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat64Compare(this, value, cont); + case IrOpcode::kProjection: + // Check if this is the overflow output projection of an + // WithOverflow node. + if (ProjectionIndexOf(value->op()) == 1u) { + // We cannot combine the WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // is either nullptr, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + Node* const node = value->InputAt(0); + Node* const result = NodeProperties::FindProjection(node, 0); + if (result == nullptr || IsDefined(result)) { + switch (node->opcode()) { + case IrOpcode::kInt32AddWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kRiscvAdd64, cont); + case IrOpcode::kInt32SubWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kRiscvSub64, cont); + case IrOpcode::kInt32MulWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kRiscvMulOvf32, cont); + case IrOpcode::kInt64AddWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kRiscvAddOvf64, cont); + case IrOpcode::kInt64SubWithOverflow: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, kRiscvSubOvf64, cont); + default: + break; + } + } + } + break; + case IrOpcode::kWord32And: + case IrOpcode::kWord64And: + return VisitWordCompare(this, value, kRiscvTst, cont, true); + case IrOpcode::kStackPointerGreaterThan: + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); + default: + break; + } + } + + // Continuation could not be combined with a compare, emit compare against 0. + EmitWordCompareZero(this, value, cont); +} + +void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { + RiscvOperandGenerator g(this); + InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); + + // Emit either ArchTableSwitch or ArchBinarySearchSwitch. + if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { + static const size_t kMaxTableSwitchValueRange = 2 << 16; + size_t table_space_cost = 10 + 2 * sw.value_range(); + size_t table_time_cost = 3; + size_t lookup_space_cost = 2 + 2 * sw.case_count(); + size_t lookup_time_cost = sw.case_count(); + if (sw.case_count() > 0 && + table_space_cost + 3 * table_time_cost <= + lookup_space_cost + 3 * lookup_time_cost && + sw.min_value() > std::numeric_limits::min() && + sw.value_range() <= kMaxTableSwitchValueRange) { + InstructionOperand index_operand = value_operand; + if (sw.min_value()) { + index_operand = g.TempRegister(); + Emit(kRiscvSub32, index_operand, value_operand, + g.TempImmediate(sw.min_value())); + } + // Generate a table lookup. + return EmitTableSwitch(sw, index_operand); + } + } + + // Generate a tree of conditional jumps. + return EmitBinarySearchSwitch(sw, value_operand); +} + +void InstructionSelector::VisitWord32Equal(Node* const node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + Int32BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWordCompareZero(m.node(), m.left().node(), &cont); + } + + VisitWord32Compare(this, node, &cont); +} + +void InstructionSelector::VisitInt32LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); + VisitWord32Compare(this, node, &cont); +} + +void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); + VisitWord32Compare(this, node, &cont); +} + +void InstructionSelector::VisitUint32LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitWord32Compare(this, node, &cont); +} + +void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitWord32Compare(this, node, &cont); +} + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kRiscvAdd64, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kRiscvAdd64, &cont); +} + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kRiscvSub64, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kRiscvSub64, &cont); +} + +void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kRiscvMulOvf32, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kRiscvMulOvf32, &cont); +} + +void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kRiscvAddOvf64, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kRiscvAddOvf64, &cont); +} + +void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { + if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kRiscvSubOvf64, &cont); + } + FlagsContinuation cont; + VisitBinop(this, node, kRiscvSubOvf64, &cont); +} + +void InstructionSelector::VisitWord64Equal(Node* const node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + Int64BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWordCompareZero(m.node(), m.left().node(), &cont); + } + + VisitWord64Compare(this, node, &cont); +} + +void InstructionSelector::VisitInt64LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); + VisitWord64Compare(this, node, &cont); +} + +void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); + VisitWord64Compare(this, node, &cont); +} + +void InstructionSelector::VisitUint64LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitWord64Compare(this, node, &cont); +} + +void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitWord64Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat32Equal(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + VisitFloat32Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat32LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitFloat32Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitFloat32Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64Equal(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + VisitFloat64Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64LessThan(Node* node) { + FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); + VisitFloat64Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { + FlagsContinuation cont = + FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); + VisitFloat64Compare(this, node, &cont); +} + +void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { + VisitRR(this, kRiscvFloat64ExtractLowWord32, node); +} + +void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { + VisitRR(this, kRiscvFloat64ExtractHighWord32, node); +} + +void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { + VisitRR(this, kRiscvFloat64SilenceNaN, node); +} + +void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { + RiscvOperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + Emit(kRiscvFloat64InsertLowWord32, g.DefineSameAsFirst(node), + g.UseRegister(left), g.UseRegister(right)); +} + +void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { + RiscvOperandGenerator g(this); + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + Emit(kRiscvFloat64InsertHighWord32, g.DefineSameAsFirst(node), + g.UseRegister(left), g.UseRegister(right)); +} + +void InstructionSelector::VisitMemoryBarrier(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvSync, g.NoOutput()); +} + +void InstructionSelector::VisitWord32AtomicLoad(Node* node) { + AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op()); + LoadRepresentation load_rep = atomic_load_params.representation(); + ArchOpcode opcode; + switch (load_rep.representation()) { + case MachineRepresentation::kWord8: + opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16; + break; + case MachineRepresentation::kWord32: + opcode = kAtomicLoadWord32; + break; + default: + UNREACHABLE(); + } + VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32); +} + +void InstructionSelector::VisitWord32AtomicStore(Node* node) { + AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); + MachineRepresentation rep = store_params.representation(); + ArchOpcode opcode; + switch (rep) { + case MachineRepresentation::kWord8: + opcode = kAtomicStoreWord8; + break; + case MachineRepresentation::kWord16: + opcode = kAtomicStoreWord16; + break; + case MachineRepresentation::kWord32: + opcode = kAtomicStoreWord32; + break; + default: + UNREACHABLE(); + } + + VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32); +} + +void InstructionSelector::VisitWord64AtomicLoad(Node* node) { + AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op()); + LoadRepresentation load_rep = atomic_load_params.representation(); + ArchOpcode opcode; + switch (load_rep.representation()) { + case MachineRepresentation::kWord8: + opcode = kAtomicLoadUint8; + break; + case MachineRepresentation::kWord16: + opcode = kAtomicLoadUint16; + break; + case MachineRepresentation::kWord32: + opcode = kAtomicLoadWord32; + break; + case MachineRepresentation::kWord64: + opcode = kRiscvWord64AtomicLoadUint64; + break; +#ifdef V8_COMPRESS_POINTERS + case MachineRepresentation::kTaggedSigned: + opcode = kRiscv64LdDecompressTaggedSigned; + break; + case MachineRepresentation::kTaggedPointer: + opcode = kRiscv64LdDecompressTaggedPointer; + break; + case MachineRepresentation::kTagged: + opcode = kRiscv64LdDecompressAnyTagged; + break; +#else + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: + if (kTaggedSize == 8) { + opcode = kRiscvWord64AtomicLoadUint64; + } else { + opcode = kAtomicLoadWord32; + } + break; +#endif + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: + DCHECK(COMPRESS_POINTERS_BOOL); + opcode = kAtomicLoadWord32; + break; + default: + UNREACHABLE(); + } + VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64); +} + +void InstructionSelector::VisitWord64AtomicStore(Node* node) { + AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); + MachineRepresentation rep = store_params.representation(); + ArchOpcode opcode; + switch (rep) { + case MachineRepresentation::kWord8: + opcode = kAtomicStoreWord8; + break; + case MachineRepresentation::kWord16: + opcode = kAtomicStoreWord16; + break; + case MachineRepresentation::kWord32: + opcode = kAtomicStoreWord32; + break; + case MachineRepresentation::kWord64: + opcode = kRiscvWord64AtomicStoreWord64; + break; + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: + opcode = kRiscvWord64AtomicStoreWord64; + break; + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: + CHECK(COMPRESS_POINTERS_BOOL); + opcode = kAtomicStoreWord32; + break; + default: + UNREACHABLE(); + } + + VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64); +} + +void InstructionSelector::VisitWord32AtomicExchange(Node* node) { + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = kAtomicExchangeInt8; + } else if (type == MachineType::Uint8()) { + opcode = kAtomicExchangeUint8; + } else if (type == MachineType::Int16()) { + opcode = kAtomicExchangeInt16; + } else if (type == MachineType::Uint16()) { + opcode = kAtomicExchangeUint16; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = kAtomicExchangeWord32; + } else { + UNREACHABLE(); + } + + VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32); +} + +void InstructionSelector::VisitWord64AtomicExchange(Node* node) { + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Uint8()) { + opcode = kAtomicExchangeUint8; + } else if (type == MachineType::Uint16()) { + opcode = kAtomicExchangeUint16; + } else if (type == MachineType::Uint32()) { + opcode = kAtomicExchangeWord32; + } else if (type == MachineType::Uint64()) { + opcode = kRiscvWord64AtomicExchangeUint64; + } else { + UNREACHABLE(); + } + VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64); +} + +void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = kAtomicCompareExchangeInt8; + } else if (type == MachineType::Uint8()) { + opcode = kAtomicCompareExchangeUint8; + } else if (type == MachineType::Int16()) { + opcode = kAtomicCompareExchangeInt16; + } else if (type == MachineType::Uint16()) { + opcode = kAtomicCompareExchangeUint16; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = kAtomicCompareExchangeWord32; + } else { + UNREACHABLE(); + } + + VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32); +} + +void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Uint8()) { + opcode = kAtomicCompareExchangeUint8; + } else if (type == MachineType::Uint16()) { + opcode = kAtomicCompareExchangeUint16; + } else if (type == MachineType::Uint32()) { + opcode = kAtomicCompareExchangeWord32; + } else if (type == MachineType::Uint64()) { + opcode = kRiscvWord64AtomicCompareExchangeUint64; + } else { + UNREACHABLE(); + } + VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64); +} +void InstructionSelector::VisitWord32AtomicBinaryOperation( + Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, + ArchOpcode uint16_op, ArchOpcode word32_op) { + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = int8_op; + } else if (type == MachineType::Uint8()) { + opcode = uint8_op; + } else if (type == MachineType::Int16()) { + opcode = int16_op; + } else if (type == MachineType::Uint16()) { + opcode = uint16_op; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = word32_op; + } else { + UNREACHABLE(); + } + + VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32); +} + +#define VISIT_ATOMIC_BINOP(op) \ + void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ + VisitWord32AtomicBinaryOperation( \ + node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \ + kAtomic##op##Uint16, kAtomic##op##Word32); \ + } +VISIT_ATOMIC_BINOP(Add) +VISIT_ATOMIC_BINOP(Sub) +VISIT_ATOMIC_BINOP(And) +VISIT_ATOMIC_BINOP(Or) +VISIT_ATOMIC_BINOP(Xor) +#undef VISIT_ATOMIC_BINOP + +void InstructionSelector::VisitWord64AtomicBinaryOperation( + Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, + ArchOpcode uint64_op) { + ArchOpcode opcode; + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Uint8()) { + opcode = uint8_op; + } else if (type == MachineType::Uint16()) { + opcode = uint16_op; + } else if (type == MachineType::Uint32()) { + opcode = uint32_op; + } else if (type == MachineType::Uint64()) { + opcode = uint64_op; + } else { + UNREACHABLE(); + } + VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64); +} + +#define VISIT_ATOMIC_BINOP(op) \ + void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ + VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \ + kAtomic##op##Uint16, kAtomic##op##Word32, \ + kRiscvWord64Atomic##op##Uint64); \ + } +VISIT_ATOMIC_BINOP(Add) +VISIT_ATOMIC_BINOP(Sub) +VISIT_ATOMIC_BINOP(And) +VISIT_ATOMIC_BINOP(Or) +VISIT_ATOMIC_BINOP(Xor) +#undef VISIT_ATOMIC_BINOP + +void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { + UNREACHABLE(); +} + +void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { + UNREACHABLE(); +} + +#define SIMD_TYPE_LIST(V) \ + V(F32x4) \ + V(I64x2) \ + V(I32x4) \ + V(I16x8) \ + V(I8x16) + +#define SIMD_UNOP_LIST(V) \ + V(F64x2Abs, kRiscvF64x2Abs) \ + V(F64x2Neg, kRiscvF64x2Neg) \ + V(F64x2Sqrt, kRiscvF64x2Sqrt) \ + V(F64x2ConvertLowI32x4S, kRiscvF64x2ConvertLowI32x4S) \ + V(F64x2ConvertLowI32x4U, kRiscvF64x2ConvertLowI32x4U) \ + V(F64x2PromoteLowF32x4, kRiscvF64x2PromoteLowF32x4) \ + V(F64x2Ceil, kRiscvF64x2Ceil) \ + V(F64x2Floor, kRiscvF64x2Floor) \ + V(F64x2Trunc, kRiscvF64x2Trunc) \ + V(F64x2NearestInt, kRiscvF64x2NearestInt) \ + V(I64x2Neg, kRiscvI64x2Neg) \ + V(I64x2Abs, kRiscvI64x2Abs) \ + V(I64x2BitMask, kRiscvI64x2BitMask) \ + V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \ + V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \ + V(F32x4Abs, kRiscvF32x4Abs) \ + V(F32x4Neg, kRiscvF32x4Neg) \ + V(F32x4Sqrt, kRiscvF32x4Sqrt) \ + V(F32x4RecipApprox, kRiscvF32x4RecipApprox) \ + V(F32x4RecipSqrtApprox, kRiscvF32x4RecipSqrtApprox) \ + V(F32x4DemoteF64x2Zero, kRiscvF32x4DemoteF64x2Zero) \ + V(F32x4Ceil, kRiscvF32x4Ceil) \ + V(F32x4Floor, kRiscvF32x4Floor) \ + V(F32x4Trunc, kRiscvF32x4Trunc) \ + V(F32x4NearestInt, kRiscvF32x4NearestInt) \ + V(I32x4RelaxedTruncF32x4S, kRiscvI32x4SConvertF32x4) \ + V(I32x4RelaxedTruncF32x4U, kRiscvI32x4UConvertF32x4) \ + V(I32x4RelaxedTruncF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \ + V(I32x4RelaxedTruncF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \ + V(I64x2SConvertI32x4Low, kRiscvI64x2SConvertI32x4Low) \ + V(I64x2SConvertI32x4High, kRiscvI64x2SConvertI32x4High) \ + V(I64x2UConvertI32x4Low, kRiscvI64x2UConvertI32x4Low) \ + V(I64x2UConvertI32x4High, kRiscvI64x2UConvertI32x4High) \ + V(I32x4SConvertF32x4, kRiscvI32x4SConvertF32x4) \ + V(I32x4UConvertF32x4, kRiscvI32x4UConvertF32x4) \ + V(I32x4Neg, kRiscvI32x4Neg) \ + V(I32x4SConvertI16x8Low, kRiscvI32x4SConvertI16x8Low) \ + V(I32x4SConvertI16x8High, kRiscvI32x4SConvertI16x8High) \ + V(I32x4UConvertI16x8Low, kRiscvI32x4UConvertI16x8Low) \ + V(I32x4UConvertI16x8High, kRiscvI32x4UConvertI16x8High) \ + V(I32x4Abs, kRiscvI32x4Abs) \ + V(I32x4BitMask, kRiscvI32x4BitMask) \ + V(I32x4TruncSatF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \ + V(I32x4TruncSatF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \ + V(I16x8Neg, kRiscvI16x8Neg) \ + V(I16x8SConvertI8x16Low, kRiscvI16x8SConvertI8x16Low) \ + V(I16x8SConvertI8x16High, kRiscvI16x8SConvertI8x16High) \ + V(I16x8UConvertI8x16Low, kRiscvI16x8UConvertI8x16Low) \ + V(I16x8UConvertI8x16High, kRiscvI16x8UConvertI8x16High) \ + V(I16x8Abs, kRiscvI16x8Abs) \ + V(I16x8BitMask, kRiscvI16x8BitMask) \ + V(I8x16Neg, kRiscvI8x16Neg) \ + V(I8x16Abs, kRiscvI8x16Abs) \ + V(I8x16BitMask, kRiscvI8x16BitMask) \ + V(I8x16Popcnt, kRiscvI8x16Popcnt) \ + V(S128Not, kRiscvS128Not) \ + V(V128AnyTrue, kRiscvV128AnyTrue) \ + V(I32x4AllTrue, kRiscvI32x4AllTrue) \ + V(I16x8AllTrue, kRiscvI16x8AllTrue) \ + V(I8x16AllTrue, kRiscvI8x16AllTrue) \ + V(I64x2AllTrue, kRiscvI64x2AllTrue) + +#define SIMD_SHIFT_OP_LIST(V) \ + V(I64x2Shl) \ + V(I64x2ShrS) \ + V(I64x2ShrU) \ + V(I32x4Shl) \ + V(I32x4ShrS) \ + V(I32x4ShrU) \ + V(I16x8Shl) \ + V(I16x8ShrS) \ + V(I16x8ShrU) \ + V(I8x16Shl) \ + V(I8x16ShrS) \ + V(I8x16ShrU) + +#define SIMD_BINOP_LIST(V) \ + V(F64x2Add, kRiscvF64x2Add) \ + V(F64x2Sub, kRiscvF64x2Sub) \ + V(F64x2Mul, kRiscvF64x2Mul) \ + V(F64x2Div, kRiscvF64x2Div) \ + V(F64x2Min, kRiscvF64x2Min) \ + V(F64x2Max, kRiscvF64x2Max) \ + V(F64x2Eq, kRiscvF64x2Eq) \ + V(F64x2Ne, kRiscvF64x2Ne) \ + V(F64x2Lt, kRiscvF64x2Lt) \ + V(F64x2Le, kRiscvF64x2Le) \ + V(I64x2Eq, kRiscvI64x2Eq) \ + V(I64x2Ne, kRiscvI64x2Ne) \ + V(I64x2GtS, kRiscvI64x2GtS) \ + V(I64x2GeS, kRiscvI64x2GeS) \ + V(I64x2Add, kRiscvI64x2Add) \ + V(I64x2Sub, kRiscvI64x2Sub) \ + V(I64x2Mul, kRiscvI64x2Mul) \ + V(F32x4Add, kRiscvF32x4Add) \ + V(F32x4Sub, kRiscvF32x4Sub) \ + V(F32x4Mul, kRiscvF32x4Mul) \ + V(F32x4Div, kRiscvF32x4Div) \ + V(F32x4Max, kRiscvF32x4Max) \ + V(F32x4Min, kRiscvF32x4Min) \ + V(F32x4Eq, kRiscvF32x4Eq) \ + V(F32x4Ne, kRiscvF32x4Ne) \ + V(F32x4Lt, kRiscvF32x4Lt) \ + V(F32x4Le, kRiscvF32x4Le) \ + V(F32x4RelaxedMin, kRiscvF32x4Min) \ + V(F32x4RelaxedMax, kRiscvF32x4Max) \ + V(F64x2RelaxedMin, kRiscvF64x2Min) \ + V(F64x2RelaxedMax, kRiscvF64x2Max) \ + V(I32x4Add, kRiscvI32x4Add) \ + V(I32x4Sub, kRiscvI32x4Sub) \ + V(I32x4Mul, kRiscvI32x4Mul) \ + V(I32x4MaxS, kRiscvI32x4MaxS) \ + V(I32x4MinS, kRiscvI32x4MinS) \ + V(I32x4MaxU, kRiscvI32x4MaxU) \ + V(I32x4MinU, kRiscvI32x4MinU) \ + V(I32x4Eq, kRiscvI32x4Eq) \ + V(I32x4Ne, kRiscvI32x4Ne) \ + V(I32x4GtS, kRiscvI32x4GtS) \ + V(I32x4GeS, kRiscvI32x4GeS) \ + V(I32x4GtU, kRiscvI32x4GtU) \ + V(I32x4GeU, kRiscvI32x4GeU) \ + V(I16x8Add, kRiscvI16x8Add) \ + V(I16x8AddSatS, kRiscvI16x8AddSatS) \ + V(I16x8AddSatU, kRiscvI16x8AddSatU) \ + V(I16x8Sub, kRiscvI16x8Sub) \ + V(I16x8SubSatS, kRiscvI16x8SubSatS) \ + V(I16x8SubSatU, kRiscvI16x8SubSatU) \ + V(I16x8Mul, kRiscvI16x8Mul) \ + V(I16x8MaxS, kRiscvI16x8MaxS) \ + V(I16x8MinS, kRiscvI16x8MinS) \ + V(I16x8MaxU, kRiscvI16x8MaxU) \ + V(I16x8MinU, kRiscvI16x8MinU) \ + V(I16x8Eq, kRiscvI16x8Eq) \ + V(I16x8Ne, kRiscvI16x8Ne) \ + V(I16x8GtS, kRiscvI16x8GtS) \ + V(I16x8GeS, kRiscvI16x8GeS) \ + V(I16x8GtU, kRiscvI16x8GtU) \ + V(I16x8GeU, kRiscvI16x8GeU) \ + V(I16x8RoundingAverageU, kRiscvI16x8RoundingAverageU) \ + V(I16x8Q15MulRSatS, kRiscvI16x8Q15MulRSatS) \ + V(I16x8SConvertI32x4, kRiscvI16x8SConvertI32x4) \ + V(I16x8UConvertI32x4, kRiscvI16x8UConvertI32x4) \ + V(I8x16Add, kRiscvI8x16Add) \ + V(I8x16AddSatS, kRiscvI8x16AddSatS) \ + V(I8x16AddSatU, kRiscvI8x16AddSatU) \ + V(I8x16Sub, kRiscvI8x16Sub) \ + V(I8x16SubSatS, kRiscvI8x16SubSatS) \ + V(I8x16SubSatU, kRiscvI8x16SubSatU) \ + V(I8x16MaxS, kRiscvI8x16MaxS) \ + V(I8x16MinS, kRiscvI8x16MinS) \ + V(I8x16MaxU, kRiscvI8x16MaxU) \ + V(I8x16MinU, kRiscvI8x16MinU) \ + V(I8x16Eq, kRiscvI8x16Eq) \ + V(I8x16Ne, kRiscvI8x16Ne) \ + V(I8x16GtS, kRiscvI8x16GtS) \ + V(I8x16GeS, kRiscvI8x16GeS) \ + V(I8x16GtU, kRiscvI8x16GtU) \ + V(I8x16GeU, kRiscvI8x16GeU) \ + V(I8x16RoundingAverageU, kRiscvI8x16RoundingAverageU) \ + V(I8x16SConvertI16x8, kRiscvI8x16SConvertI16x8) \ + V(I8x16UConvertI16x8, kRiscvI8x16UConvertI16x8) \ + V(S128And, kRiscvS128And) \ + V(S128Or, kRiscvS128Or) \ + V(S128Xor, kRiscvS128Xor) \ + V(S128AndNot, kRiscvS128AndNot) + +void InstructionSelector::VisitS128Const(Node* node) { + RiscvOperandGenerator g(this); + static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t); + uint32_t val[kUint32Immediates]; + memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size); + // If all bytes are zeros or ones, avoid emitting code for generic constants + bool all_zeros = !(val[0] || val[1] || val[2] || val[3]); + bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX && + val[2] == UINT32_MAX && val[3] == UINT32_MAX; + InstructionOperand dst = g.DefineAsRegister(node); + if (all_zeros) { + Emit(kRiscvS128Zero, dst); + } else if (all_ones) { + Emit(kRiscvS128AllOnes, dst); + } else { + Emit(kRiscvS128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]), + g.UseImmediate(val[2]), g.UseImmediate(val[3])); + } +} + +void InstructionSelector::VisitS128Zero(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvS128Zero, g.DefineAsRegister(node)); +} + +#define SIMD_VISIT_SPLAT(Type) \ + void InstructionSelector::Visit##Type##Splat(Node* node) { \ + VisitRR(this, kRiscv##Type##Splat, node); \ + } +SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) +SIMD_VISIT_SPLAT(F64x2) +#undef SIMD_VISIT_SPLAT + +#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \ + void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \ + VisitRRI(this, kRiscv##Type##ExtractLane##Sign, node); \ + } +SIMD_VISIT_EXTRACT_LANE(F64x2, ) +SIMD_VISIT_EXTRACT_LANE(F32x4, ) +SIMD_VISIT_EXTRACT_LANE(I32x4, ) +SIMD_VISIT_EXTRACT_LANE(I64x2, ) +SIMD_VISIT_EXTRACT_LANE(I16x8, U) +SIMD_VISIT_EXTRACT_LANE(I16x8, S) +SIMD_VISIT_EXTRACT_LANE(I8x16, U) +SIMD_VISIT_EXTRACT_LANE(I8x16, S) +#undef SIMD_VISIT_EXTRACT_LANE + +#define SIMD_VISIT_REPLACE_LANE(Type) \ + void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ + VisitRRIR(this, kRiscv##Type##ReplaceLane, node); \ + } +SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) +SIMD_VISIT_REPLACE_LANE(F64x2) +#undef SIMD_VISIT_REPLACE_LANE + +#define SIMD_VISIT_UNOP(Name, instruction) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRR(this, instruction, node); \ + } +SIMD_UNOP_LIST(SIMD_VISIT_UNOP) +#undef SIMD_VISIT_UNOP + +#define SIMD_VISIT_SHIFT_OP(Name) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitSimdShift(this, kRiscv##Name, node); \ + } +SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) +#undef SIMD_VISIT_SHIFT_OP + +#define SIMD_VISIT_BINOP(Name, instruction) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRRR(this, instruction, node); \ + } +SIMD_BINOP_LIST(SIMD_VISIT_BINOP) +#undef SIMD_VISIT_BINOP + +void InstructionSelector::VisitS128Select(Node* node) { + VisitRRRR(this, kRiscvS128Select, node); +} + +#define SIMD_VISIT_SELECT_LANE(Name) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRRRR(this, kRiscvS128Select, node); \ + } +SIMD_VISIT_SELECT_LANE(I8x16RelaxedLaneSelect) +SIMD_VISIT_SELECT_LANE(I16x8RelaxedLaneSelect) +SIMD_VISIT_SELECT_LANE(I32x4RelaxedLaneSelect) +SIMD_VISIT_SELECT_LANE(I64x2RelaxedLaneSelect) +#undef SIMD_VISIT_SELECT_LANE + +#define VISIT_SIMD_QFMOP(Name, instruction) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitRRRR(this, instruction, node); \ + } +VISIT_SIMD_QFMOP(F64x2Qfma, kRiscvF64x2Qfma) +VISIT_SIMD_QFMOP(F64x2Qfms, kRiscvF64x2Qfms) +VISIT_SIMD_QFMOP(F32x4Qfma, kRiscvF32x4Qfma) +VISIT_SIMD_QFMOP(F32x4Qfms, kRiscvF32x4Qfms) +#undef VISIT_SIMD_QFMOP + +void InstructionSelector::VisitI32x4DotI16x8S(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand temp = g.TempFpRegister(v16); + InstructionOperand temp1 = g.TempFpRegister(v14); + InstructionOperand temp2 = g.TempFpRegister(v30); + InstructionOperand dst = g.DefineAsRegister(node); + this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1)), g.UseImmediate(E16), + g.UseImmediate(m1)); + this->Emit(kRiscvVcompress, temp2, temp, g.UseImmediate(0b01010101), + g.UseImmediate(E32), g.UseImmediate(m2)); + this->Emit(kRiscvVcompress, temp1, temp, g.UseImmediate(0b10101010), + g.UseImmediate(E32), g.UseImmediate(m2)); + this->Emit(kRiscvVaddVv, dst, temp1, temp2, g.UseImmediate(E32), + g.UseImmediate(m1)); +} + +namespace { + +struct ShuffleEntry { + uint8_t shuffle[kSimd128Size]; + ArchOpcode opcode; +}; + +// static const ShuffleEntry arch_shuffles[] = { +// {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, +// kRiscvS32x4InterleaveRight}, +// {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, +// kRiscvS32x4InterleaveLeft}, +// {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}, +// kRiscvS32x4PackEven}, +// {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}, +// kRiscvS32x4PackOdd}, +// {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}, +// kRiscvS32x4InterleaveEven}, +// {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}, +// kRiscvS32x4InterleaveOdd}, + +// {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, +// kRiscvS16x8InterleaveRight}, +// {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, +// kRiscvS16x8InterleaveLeft}, +// {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, +// kRiscvS16x8PackEven}, +// {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, +// kRiscvS16x8PackOdd}, +// {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}, +// kRiscvS16x8InterleaveEven}, +// {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}, +// kRiscvS16x8InterleaveOdd}, +// {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, +// kRiscvS16x4Reverse}, +// {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, +// kRiscvS16x2Reverse}, + +// {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, +// kRiscvS8x16InterleaveRight}, +// {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, +// kRiscvS8x16InterleaveLeft}, +// {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, +// kRiscvS8x16PackEven}, +// {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, +// kRiscvS8x16PackOdd}, +// {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, +// kRiscvS8x16InterleaveEven}, +// {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, +// kRiscvS8x16InterleaveOdd}, +// {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, +// kRiscvS8x8Reverse}, +// {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, +// kRiscvS8x4Reverse}, +// {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, +// kRiscvS8x2Reverse}}; + +// bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, +// size_t num_entries, bool is_swizzle, +// ArchOpcode* opcode) { +// uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; +// for (size_t i = 0; i < num_entries; ++i) { +// const ShuffleEntry& entry = table[i]; +// int j = 0; +// for (; j < kSimd128Size; ++j) { +// if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { +// break; +// } +// } +// if (j == kSimd128Size) { +// *opcode = entry.opcode; +// return true; +// } +// } +// return false; +// } + +} // namespace + +void InstructionSelector::VisitI8x16Shuffle(Node* node) { + uint8_t shuffle[kSimd128Size]; + bool is_swizzle; + CanonicalizeShuffle(node, shuffle, &is_swizzle); + Node* input0 = node->InputAt(0); + Node* input1 = node->InputAt(1); + RiscvOperandGenerator g(this); + // uint8_t shuffle32x4[4]; + // ArchOpcode opcode; + // if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles), + // is_swizzle, &opcode)) { + // VisitRRR(this, opcode, node); + // return; + // } + // uint8_t offset; + // if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) { + // Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1), + // g.UseRegister(input0), g.UseImmediate(offset)); + // return; + // } + // if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) { + // Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), + // g.UseRegister(input1), + // g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4))); + // return; + // } + Emit(kRiscvI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), + g.UseRegister(input1), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)), + g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12))); +} + +void InstructionSelector::VisitI8x16Swizzle(Node* node) { + RiscvOperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + // We don't want input 0 or input 1 to be the same as output, since we will + // modify output before do the calculation. + Emit(kRiscvVrgather, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E8), + g.UseImmediate(m1), arraysize(temps), temps); +} + +void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvSignExtendByte, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvSignExtendShort, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvSignExtendByte, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) { + RiscvOperandGenerator g(this); + Emit(kRiscvSignExtendShort, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) { + EmitSignExtendWord(this, node); +} + +void InstructionSelector::VisitF32x4Pmin(Node* node) { + VisitUniqueRRR(this, kRiscvF32x4Pmin, node); +} + +void InstructionSelector::VisitF32x4Pmax(Node* node) { + VisitUniqueRRR(this, kRiscvF32x4Pmax, node); +} + +void InstructionSelector::VisitF64x2Pmin(Node* node) { + VisitUniqueRRR(this, kRiscvF64x2Pmin, node); +} + +void InstructionSelector::VisitF64x2Pmax(Node* node) { + VisitUniqueRRR(this, kRiscvF64x2Pmax, node); +} + +#define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \ + void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2##S( \ + Node* node) { \ + RiscvOperandGenerator g(this); \ + Emit(kRiscvVwmul, g.DefineAsRegister(node), \ + g.UseUniqueRegister(node->InputAt(0)), \ + g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E##TYPE), \ + g.UseImmediate(mf2)); \ + } \ + void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##S( \ + Node* node) { \ + RiscvOperandGenerator g(this); \ + InstructionOperand t1 = g.TempFpRegister(v16); \ + Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \ + g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \ + g.UseImmediate(m1)); \ + InstructionOperand t2 = g.TempFpRegister(v17); \ + Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \ + g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \ + g.UseImmediate(m1)); \ + Emit(kRiscvVwmul, g.DefineAsRegister(node), t1, t2, \ + g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \ + } \ + void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2##U( \ + Node* node) { \ + RiscvOperandGenerator g(this); \ + Emit(kRiscvVwmulu, g.DefineAsRegister(node), \ + g.UseUniqueRegister(node->InputAt(0)), \ + g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E##TYPE), \ + g.UseImmediate(mf2)); \ + } \ + void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##U( \ + Node* node) { \ + RiscvOperandGenerator g(this); \ + InstructionOperand t1 = g.TempFpRegister(v16); \ + Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \ + g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \ + g.UseImmediate(m1)); \ + InstructionOperand t2 = g.TempFpRegister(v17); \ + Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \ + g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \ + g.UseImmediate(m1)); \ + Emit(kRiscvVwmulu, g.DefineAsRegister(node), t1, t2, \ + g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \ + } + +VISIT_EXT_MUL(I64x2, I32x4, 32) +VISIT_EXT_MUL(I32x4, I16x8, 16) +VISIT_EXT_MUL(I16x8, I8x16, 8) +#undef VISIT_EXT_MUL + +void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g, + int first_input_index, + Node* node) { + UNREACHABLE(); +} + +// static +MachineOperatorBuilder::Flags +InstructionSelector::SupportedMachineOperatorFlags() { + MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags; + return flags | MachineOperatorBuilder::kWord32ShiftIsSafe | + MachineOperatorBuilder::kInt32DivIsSafe | + MachineOperatorBuilder::kUint32DivIsSafe | + MachineOperatorBuilder::kFloat64RoundDown | + MachineOperatorBuilder::kFloat32RoundDown | + MachineOperatorBuilder::kFloat64RoundUp | + MachineOperatorBuilder::kFloat32RoundUp | + MachineOperatorBuilder::kFloat64RoundTruncate | + MachineOperatorBuilder::kFloat32RoundTruncate | + MachineOperatorBuilder::kFloat64RoundTiesEven | + MachineOperatorBuilder::kFloat32RoundTiesEven; +} + +// static +MachineOperatorBuilder::AlignmentRequirements +InstructionSelector::AlignmentRequirements() { +#ifdef RISCV_HAS_NO_UNALIGNED + return MachineOperatorBuilder::AlignmentRequirements:: + NoUnalignedAccessSupport(); +#else + return MachineOperatorBuilder::AlignmentRequirements:: + FullUnalignedAccessSupport(); +#endif +} + +#undef SIMD_BINOP_LIST +#undef SIMD_SHIFT_OP_LIST +#undef SIMD_UNOP_LIST +#undef SIMD_TYPE_LIST +#undef TRACE_UNIMPL +#undef TRACE + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc new file mode 100644 index 00000000000000..a4cae83e3b710d --- /dev/null +++ b/deps/v8/src/debug/debug-type-profile.cc @@ -0,0 +1,121 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/debug/debug-type-profile.h" + +#include "src/execution/isolate.h" +#include "src/objects/feedback-vector.h" +#include "src/objects/objects-inl.h" +#include "src/objects/objects.h" + +namespace v8 { +namespace internal { + +std::unique_ptr TypeProfile::Collect(Isolate* isolate) { + std::unique_ptr result(new TypeProfile()); + + // Feedback vectors are already listed to prevent losing them to GC. + DCHECK(isolate->factory() + ->feedback_vectors_for_profiling_tools() + ->IsArrayList()); + Handle list = Handle::cast( + isolate->factory()->feedback_vectors_for_profiling_tools()); + + Script::Iterator scripts(isolate); + + for (Script script = scripts.Next(); !script.is_null(); + script = scripts.Next()) { + if (!script.IsUserJavaScript()) { + continue; + } + + Handle