deps: update V8 to 11.3.244.4

PR-URL: https://github.com/nodejs/node/pull/47251
Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com>
Reviewed-By: Richard Lau <rlau@redhat.com>
This commit is contained in:
Michaël Zasso 2023-03-30 12:11:08 +02:00 committed by Node.js GitHub Bot
parent 10928cb0a4
commit f226350fcb
2560 changed files with 175089 additions and 108615 deletions

View file

@ -18,11 +18,106 @@
# - Because you must use a hash, you need to append to this list in a follow-up
# CL to the actual reformatting CL that you are trying to ignore.
# objects.h splitting
0604031eb1d01c52b6c1c9ae3012d80b23d74a68
09e405453359000d66cc0faaa102854e626bebeb
766ef168fbcac6bd0728cc2c9bb3ae7cbd74278a
b5a2839b927be04bdb50a236071c11764e4d6400
c911f91b5b6219e038c0117b05a8375bdf3db0b0
1bb48bf91ba8c887884a0fbd674c91f64964d8a5
19da9f24df7b59fec72b9dd8a979ad0ce5639c87
b090d7e7468236ffce0afdb55bb496bf0073f2ee
f40638d148b7a435522d5b714993908061e3b10d
e8a1c25f6afae9b77921abb70fad49da252eb6f0
6fa8283d0e031c9585d190f751907ed45bf85de0
9aa861c4bcfed612039259f93c2cd2b01337e99e
8175648018bd9f70af866f9fa433f1d79644d86b
c7b1ceb801ec7f639a093468d8e6424212cc197c
e39d2cbe1b1baa6513ddce2d73c981e335cc34fb
eda00a5c499b7a83479115eb275a816b8a2ed104
68deca9b418976ca8b3375e81058a9e0a815357f
0525e17847f39f80e3fd163021a58f68d8fcaf06
81a3c699d6eef936452ac3d10c7c59a2c1e38c0c
01452bedfca2b5447a7f62bda87edbbb76259a6e
1baf1050113a5418696839c273e05ea5ad1b5c4d
4b39fe3d608916b1cfea015de287511a1623fc7f
c6effdbba9b301244475553538f6eb1b3d9670b9
71e4c573199466ea4541e3d6b307c9b33d7bb785
efc92f0d4aa77bb90f5b56606b6f0d0819fba4af
a9db2c74b5bae2345ac52be404748954a3b5050d
0a01b6202226bbe99c0b83acf6c5a80344f5fb6a
a6c44361c8f2dc07b935e3f2bb3e0d3ad4f4a383
10d8aab1de430695a69e9d75af6ea42c2cdc9d6d
dd3c4fca2f0a2761b8b95cd47fcd62836d714890
e9c932233980866074025e65051003d1f298516c
2b1f79881c3f0b69bfb9274bda57ea50f7304982
7f031160d71a3d836667dc98288eaff4c94e6f56
490fabb4578f8a3c4096fdccff688c17ed5ed00d
d953b2ab726acca0b3abe90ce090a16d7ccc2ae3
bb514c426b9438cfb1149d219ac4ec2d8d1c8458
dfb453d713d8a05e76f720a6aae2871eec210276
b490fd66b873c89fca37b21eab58502b6367a864
9a71683d9c8ff9470eda6be5b2b11babac7b9863
37945f731c4d800ef788e3c32f8663773a93450e
b90c98fc29a8d896354de4a22c055f6d98376171
35f3e9d0e654e84646a0b98f29e4a2786cdca4b1
260eb5bb9b62ea3d5fa6ad0b0e8c2de75d48bad4
cc2c11441ce352360acce8638a19f58edf361f7d
7be0159e4b1e0b064e215ae4ced34d649cb2552e
95a7cfe0eaabbcff0f730ed60e1805779f6cfe41
8f54d18ba4ad10770e9537a2803459feccfe79a3
f44759d9ff52a3e5563e5f2bb23ee2c08222fcfd
09050c8a967f5f2956305e5d016b304d7bf5e669
c769745d5856a7eb3a0dbe6af5376c7638944364
a1547aa914aeedd7862f74124c18d2bbaf432c36
5f950698c0dc7c36b855961feb929022f74102fb
4aedeb1bd50c12ebcd6cf954c4cbef1205fff5ac
7366d8954cb1bd277d3283241da2fae62b886c48
bc35251f5e55a65c3a4acf7cba52cee505c86a46
4fb60b215801db70c694a799e735b64bfead59bb
03762b8488de0e393077e3f40fe7b63e675b3af3
a8a45d875f0a98b192cf0063ceda12aaf75ddfaf
a48e5ab8804e9e97b5ea577d6f2667bacee92eb2
# Update of quotations in DEPS file.
e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a
# Rewrite code base to use "." instead of "->" to access Object members.
878ccb33bd3cf0e6dc018ff8d15843f585ac07be
# Splitting src/ into subfolders
632239011db501e76475d82ff6492f37fa8c1edc
f455f86d899716df3b9550950ce172f5b867619a
24a51e1eee4e286165dd0bba6afb4c35e8177a25
f9a88acbc928f0fc5e9a3acbcd3b4ece52355f3d
dec3298d9cfbe95759774a0e00302a08836b5f3d
a0c3797461810e3159662851e64946e17654236e
b72941e8b0d2843adf768442024d8950da798db1
4c986c625f19e35c95f3492c662822f4695218b4
0fa243af7096ee5b748b194476be2e4efecaec59
786ce26341b7ab11b4d42f1c77202530d5138ad2
a6eeea35cb7ff0c29b6cfdd1c786f382110241ce
be014256adea1552d4a044ef80616cdab6a7d549
93d3b7173fec7d010539057cdbd78d497f09fa9b
5bfe84a0dab60289b3470c080908ce83ac2212d4
a7695520556665ba73ab02c497ab73b162a5fb13
61523c45a335fe3be76498e0b16bf8e7aec0d058
bf372a73d8a5f4029fc9f4f69b675ef0cad80ada
8ad6b335376c6275ffb3361c662a1a45c853f4fc
06bf8261cf2c94fc071652652600b5790f719c05
81a0102fe8586071cc68e9595b26c5c1207ee5b3
5f28539599f6a6a265e18b8c897cc96ccbeec9c4
3253767622a784866dc34aeb7b5d0f02ebdff61e
9ac8b20086f95f1158a1901eefe12e25fd0333e4
3cb560adfe26edb586a0e6e655e5a7c4755cad1a
7bbd0bfe5161d57bcf268716ce4d1ce14d6786e6
c39cabbcbea26891558b81fd2236c38a7aeada08
a3187716d31a0ab9d7051adde6be9bd2b2c6fec1
# Move test/mjsunit/regress-*.js => test/mjsunit/regress/
cb67be1a3842fcf6a0da18aee444e3b7ea789e04
# [include] Split out v8.h
d1b27019d3bf86360ea838c317f8505fac6d3a7e
44fe02ced6e4c6b49d627807e3b3fd0edbbeb36e
ec06bb6ce5641cf65e400ec55b7421f87d04b999

3
deps/v8/.gitignore vendored
View file

@ -28,6 +28,7 @@
.clangd
.cpplint-cache
.cproject
.DS_Store
.gclient_entries
.gdb_history
.idea
@ -67,6 +68,7 @@
/third_party/jsoncpp/source
!/third_party/colorama
/third_party/colorama/src
!/third_party/glibc
!/third_party/googletest
/third_party/googletest/src/*
!/third_party/googletest/src/googletest
@ -79,6 +81,7 @@
!/third_party/test262-harness
!/third_party/v8
!/third_party/wasm-api
/tools/builtins-pgo/profiles/*.profile
/tools/clang
/tools/gcmole/bootstrap
/tools/gcmole/gcmole-tools

2
deps/v8/.style.yapf vendored
View file

@ -1,2 +1,2 @@
[style]
based_on_style = chromium
based_on_style = yapf

7
deps/v8/AUTHORS vendored
View file

@ -44,6 +44,7 @@ CodeWeavers, Inc. <*@codeweavers.com>
Alibaba, Inc. <*@alibaba-inc.com>
SiFive, Inc. <*@sifive.com>
Aapo Alasuutari <aapo.alasuutari@gmail.com>
Aaron Bieber <deftly@gmail.com>
Aaron O'Mullan <aaron.omullan@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
@ -81,6 +82,7 @@ Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Chao Wang <chao.w@rioslab.org>
Charles Kerr <charles@charleskerr.com>
Cheng Zhao <zcbenz@gmail.com>
Chengzhong Wu <legendecas@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Chris Nardi <hichris123@gmail.com>
@ -153,6 +155,8 @@ Jiawen Geng <technicalcute@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Johan Levin <johan13@gmail.com>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Jonathan Liu <net147@gmail.com>
Juan Arboleda <soyjuanarbol@gmail.com>
Julien Brianceau <jbriance@cisco.com>
@ -256,6 +260,7 @@ Ujjwal Sharma <usharma1998@gmail.com>
Vadim Gorbachev <bmsdave@gmail.com>
Varun Varada <varuncvarada@gmail.com>
Victor Costan <costan@gmail.com>
Victor Polevoy <fx@thefx.co>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
@ -286,3 +291,5 @@ Zheng Liu <i6122f@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
柳荣一 <admin@web-tinker.com>
Yang Xiang <xiangyangemail@gmail.com>
Kotaro Ohsugi <dec4m4rk@gmail.com>
Jing Peiyang <jingpeiyang@eswincomputing.com>

365
deps/v8/BUILD.bazel vendored
View file

@ -16,7 +16,8 @@ load(
"v8_library",
"v8_mksnapshot",
"v8_string",
"v8_torque",
"v8_torque_definitions",
"v8_torque_initializers",
)
load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression")
@ -42,6 +43,7 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# v8_enable_conservative_stack_scanning
# v8_enable_concurrent_marking
# v8_enable_ignition_dispatch_counting
# v8_enable_builtins_optimization
# v8_enable_builtins_profiling
# v8_enable_builtins_profiling_verbose
# v8_builtins_profiling_log_file
@ -68,6 +70,7 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# v8_enable_sandbox
# cppgc_enable_caged_heap
# cppgc_enable_check_assignments_in_prefinalizers
# cppgc_enable_slim_write_barrier
# cppgc_enable_object_names
# cppgc_enable_pointer_compression
# cppgc_enable_verify_heap
@ -132,6 +135,8 @@ v8_flag(name = "v8_enable_snapshot_code_comments")
v8_flag(name = "v8_enable_snapshot_native_code_counters")
v8_flag(name = "v8_enable_static_roots")
v8_flag(name = "v8_enable_trace_maps")
v8_flag(name = "v8_enable_v8_checks")
@ -144,11 +149,21 @@ v8_flag(name = "v8_enable_verify_predictable")
v8_flag(name = "v8_enable_test_features")
v8_flag(
name = "v8_enable_turbofan",
default = True,
)
v8_flag(
name = "v8_enable_webassembly",
default = True,
)
v8_flag(
name = "v8_jitless",
default = False,
)
v8_int(
name = "v8_typed_array_max_size_in_heap",
default = 64,
@ -316,16 +331,18 @@ v8_config(
"v8_enable_slow_dchecks": "ENABLE_SLOW_DCHECKS",
"v8_enable_runtime_call_stats": "V8_RUNTIME_CALL_STATS",
"v8_enable_snapshot_native_code_counters": "V8_SNAPSHOT_NATIVE_CODE_COUNTERS",
"v8_enable_static_roots": "V8_STATIC_ROOTS",
"v8_enable_trace_maps": "V8_TRACE_MAPS",
"v8_enable_turbofan": "V8_ENABLE_TURBOFAN",
"v8_enable_v8_checks": "V8_ENABLE_CHECKS",
"v8_enable_verify_csa": "ENABLE_VERIFY_CSA",
"v8_enable_verify_heap": "VERIFY_HEAP",
"v8_enable_verify_predictable": "VERIFY_PREDICTABLE",
"v8_enable_webassembly": "V8_ENABLE_WEBASSEMBLY",
"v8_jitless": "V8_JITLESS",
},
defines = [
"GOOGLE3",
"ENABLE_DEBUGGER_SUPPORT",
"V8_ADVANCED_BIGINT_ALGORITHMS",
"V8_CONCURRENT_MARKING",
] + select({
@ -595,6 +612,7 @@ filegroup(
"src/base/build_config.h",
"src/base/compiler-specific.h",
"src/base/container-utils.h",
"src/base/contextual.h",
"src/base/cpu.cc",
"src/base/cpu.h",
"src/base/debug/stack_trace.cc",
@ -653,6 +671,7 @@ filegroup(
"src/base/platform/mutex.cc",
"src/base/platform/mutex.h",
"src/base/platform/platform.h",
"src/base/platform/platform.cc",
"src/base/platform/semaphore.cc",
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
@ -706,7 +725,6 @@ filegroup(
"@v8//bazel/config:is_macos": [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-darwin.cc",
"src/base/platform/platform-macos.cc",
],
"@v8//bazel/config:is_windows": [
"src/base/win32-headers.h",
@ -816,6 +834,8 @@ filegroup(
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",
"src/builtins/iterator.tq",
"src/builtins/iterator-from.tq",
"src/builtins/iterator-helpers.tq",
"src/builtins/math.tq",
"src/builtins/number.tq",
"src/builtins/object-fromentries.tq",
@ -859,6 +879,7 @@ filegroup(
"src/builtins/string-html.tq",
"src/builtins/string-includes.tq",
"src/builtins/string-indexof.tq",
"src/builtins/string-iswellformed.tq",
"src/builtins/string-iterator.tq",
"src/builtins/string-match-search.tq",
"src/builtins/string-pad.tq",
@ -868,6 +889,7 @@ filegroup(
"src/builtins/string-startswith.tq",
"src/builtins/string-substr.tq",
"src/builtins/string-substring.tq",
"src/builtins/string-towellformed.tq",
"src/builtins/string-trim.tq",
"src/builtins/symbol.tq",
"src/builtins/torque-internal.tq",
@ -924,6 +946,7 @@ filegroup(
"src/objects/js-collection.tq",
"src/objects/js-function.tq",
"src/objects/js-generator.tq",
"src/objects/js-iterator-helpers.tq",
"src/objects/js-objects.tq",
"src/objects/js-promise.tq",
"src/objects/js-proxy.tq",
@ -963,6 +986,7 @@ filegroup(
"src/objects/templates.tq",
"src/objects/torque-defined-classes.tq",
"src/objects/turbofan-types.tq",
"src/objects/turboshaft-types.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
] + select({
@ -1008,7 +1032,6 @@ filegroup(
"src/torque/cfg.h",
"src/torque/class-debug-reader-generator.cc",
"src/torque/constants.h",
"src/torque/contextual.h",
"src/torque/cpp-builder.cc",
"src/torque/cpp-builder.h",
"src/torque/csa-generator.cc",
@ -1152,7 +1175,6 @@ filegroup(
"src/builtins/builtins-utils-inl.h",
"src/builtins/builtins-utils.h",
"src/builtins/builtins-weak-refs.cc",
"src/builtins/builtins-web-snapshots.cc",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
"src/builtins/constants-table-builder.cc",
@ -1231,8 +1253,8 @@ filegroup(
"src/codegen/tick-counter.h",
"src/codegen/tnode.cc",
"src/codegen/tnode.h",
"src/codegen/turbo-assembler.cc",
"src/codegen/turbo-assembler.h",
"src/codegen/macro-assembler-base.cc",
"src/codegen/macro-assembler-base.h",
"src/codegen/unoptimized-compilation-info.cc",
"src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.cc",
@ -1245,11 +1267,13 @@ filegroup(
"src/common/message-template.h",
"src/common/operation.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.cc",
"src/common/ptr-compr.h",
"src/compiler-dispatcher/lazy-compile-dispatcher.cc",
"src/compiler-dispatcher/lazy-compile-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/compiler/turbofan.h",
"src/date/date.cc",
"src/date/date.h",
"src/date/dateparser-inl.h",
@ -1429,6 +1453,8 @@ filegroup(
"src/heap/cppgc-js/cpp-marking-state-inl.h",
"src/heap/cppgc-js/cpp-snapshot.cc",
"src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/cross-heap-remembered-set.cc",
"src/heap/cppgc-js/cross-heap-remembered-set.h",
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-state-inl.h",
@ -1436,9 +1462,8 @@ filegroup(
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
"src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h",
"src/heap/embedder-tracing-inl.h",
"src/heap/cppgc-js/wrappable-info.h",
"src/heap/cppgc-js/wrappable-info-inl.h",
"src/heap/evacuation-verifier.cc",
"src/heap/evacuation-verifier.h",
"src/heap/evacuation-verifier-inl.h",
@ -1459,8 +1484,6 @@ filegroup(
"src/heap/gc-tracer.cc",
"src/heap/gc-tracer-inl.h",
"src/heap/gc-tracer.h",
"src/heap/global-handle-marking-visitor.cc",
"src/heap/global-handle-marking-visitor.h",
"src/heap/heap-allocator-inl.h",
"src/heap/heap-allocator.cc",
"src/heap/heap-allocator.h",
@ -1528,6 +1551,7 @@ filegroup(
"src/heap/new-spaces-inl.h",
"src/heap/new-spaces.cc",
"src/heap/new-spaces.h",
"src/heap/object-lock.h",
"src/heap/object-stats.cc",
"src/heap/object-stats.h",
"src/heap/objects-visiting-inl.h",
@ -1551,8 +1575,8 @@ filegroup(
"src/heap/remembered-set.h",
"src/heap/safepoint.cc",
"src/heap/safepoint.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenge-job.h",
"src/heap/minor-gc-job.cc",
"src/heap/minor-gc-job.h",
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
@ -1561,12 +1585,12 @@ filegroup(
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
"src/heap/stress-marking-observer.cc",
"src/heap/stress-marking-observer.h",
"src/heap/stress-scavenge-observer.cc",
"src/heap/stress-scavenge-observer.h",
"src/heap/sweeper.cc",
"src/heap/sweeper.h",
"src/heap/traced-handles-marking-visitor.cc",
"src/heap/traced-handles-marking-visitor.h",
"src/heap/weak-object-worklists.cc",
"src/heap/weak-object-worklists.h",
"src/ic/call-optimization.cc",
@ -1662,6 +1686,8 @@ filegroup(
"src/numbers/conversions.cc",
"src/numbers/conversions.h",
"src/numbers/hash-seed-inl.h",
"src/numbers/integer-literal-inl.h",
"src/numbers/integer-literal.h",
"src/numbers/math-random.cc",
"src/numbers/math-random.h",
"src/objects/all-objects-inl.h",
@ -1758,6 +1784,8 @@ filegroup(
"src/objects/js-function.h",
"src/objects/js-generator-inl.h",
"src/objects/js-generator.h",
"src/objects/js-iterator-helpers-inl.h",
"src/objects/js-iterator-helpers.h",
"src/objects/js-objects-inl.h",
"src/objects/js-objects.cc",
"src/objects/js-objects.h",
@ -1777,6 +1805,7 @@ filegroup(
"src/objects/js-shadow-realm-inl.h",
"src/objects/js-shared-array.h",
"src/objects/js-shared-array-inl.h",
"src/objects/js-struct.cc",
"src/objects/js-struct.h",
"src/objects/js-struct-inl.h",
"src/objects/js-temporal-objects.h",
@ -1915,6 +1944,8 @@ filegroup(
"src/objects/transitions.h",
"src/objects/turbofan-types-inl.h",
"src/objects/turbofan-types.h",
"src/objects/turboshaft-types-inl.h",
"src/objects/turboshaft-types.h",
"src/objects/type-hints.cc",
"src/objects/type-hints.h",
"src/objects/value-serializer.cc",
@ -2031,6 +2062,7 @@ filegroup(
"src/roots/roots-inl.h",
"src/roots/roots.cc",
"src/roots/roots.h",
"src/roots/static-roots.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-bigint.cc",
@ -2190,8 +2222,6 @@ filegroup(
"src/utils/utils.h",
"src/utils/version.cc",
"src/utils/version.h",
"src/web-snapshot/web-snapshot.h",
"src/web-snapshot/web-snapshot.cc",
"src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h",
"src/zone/compressed-zone-ptr.h",
@ -2233,10 +2263,6 @@ filegroup(
"src/codegen/ia32/macro-assembler-ia32.h",
"src/codegen/ia32/register-ia32.h",
"src/codegen/ia32/reglist-ia32.h",
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
"src/diagnostics/ia32/unwinder-ia32.cc",
@ -2263,12 +2289,6 @@ filegroup(
"src/codegen/x64/macro-assembler-x64.h",
"src/codegen/x64/register-x64.h",
"src/codegen/x64/reglist-x64.h",
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.h",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
@ -2293,12 +2313,6 @@ filegroup(
"src/codegen/arm/macro-assembler-arm.h",
"src/codegen/arm/register-arm.h",
"src/codegen/arm/reglist-arm.h",
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
@ -2334,12 +2348,6 @@ filegroup(
"src/codegen/arm64/reglist-arm64.h",
"src/codegen/arm64/utils-arm64.cc",
"src/codegen/arm64/utils-arm64.h",
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.h",
@ -2369,12 +2377,6 @@ filegroup(
"src/codegen/s390/macro-assembler-s390.h",
"src/codegen/s390/register-s390.h",
"src/codegen/s390/reglist-s390.h",
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.h",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
@ -2401,10 +2403,6 @@ filegroup(
"src/codegen/riscv64/macro-assembler-riscv64.h",
"src/codegen/riscv64/register-riscv64.h",
"src/codegen/riscv64/reglist-riscv64.h",
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
"src/compiler/backend/riscv64/instruction-codes-riscv64.h",
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
"src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
"src/diagnostics/riscv64/disasm-riscv64.cc",
"src/diagnostics/riscv64/unwinder-riscv64.cc",
@ -2430,12 +2428,6 @@ filegroup(
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
"src/codegen/ppc/reglist-ppc.h",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@ -2598,6 +2590,8 @@ filegroup(
"src/wasm/wasm-subtyping.h",
"src/wasm/wasm-tier.h",
"src/wasm/wasm-value.h",
"src/wasm/well-known-imports.cc",
"src/wasm/well-known-imports.h",
],
"//conditions:default": [],
}),
@ -2607,7 +2601,6 @@ filegroup(
name = "icu/v8_base_without_compiler_files",
srcs = [
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-intl-gen.cc",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-break-iterator.cc",
@ -2849,6 +2842,7 @@ filegroup(
"src/compiler/osr.h",
"src/compiler/per-isolate-compiler-cache.h",
"src/compiler/persistent-map.h",
"src/compiler/phase.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
@ -2881,36 +2875,91 @@ filegroup(
"src/compiler/state-values-utils.cc",
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.cc",
"src/compiler/string-builder-optimizer.cc",
"src/compiler/string-builder-optimizer.h",
"src/compiler/store-store-elimination.h",
"src/compiler/turbofan-enabled.cc",
"src/compiler/turbofan.h",
"src/compiler/turboshaft/assembler.cc",
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/assert-types-reducer.h",
"src/compiler/turboshaft/branch-elimination-reducer.h",
"src/compiler/turboshaft/build-graph-phase.cc",
"src/compiler/turboshaft/build-graph-phase.h",
"src/compiler/turboshaft/builtin-call-descriptors.h",
"src/compiler/turboshaft/dead-code-elimination-phase.cc",
"src/compiler/turboshaft/dead-code-elimination-phase.h",
"src/compiler/turboshaft/dead-code-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.cc",
"src/compiler/turboshaft/decompression-optimization.h",
"src/compiler/turboshaft/decompression-optimization-phase.cc",
"src/compiler/turboshaft/decompression-optimization-phase.h",
"src/compiler/turboshaft/define-assembler-macros.inc",
"src/compiler/turboshaft/deopt-data.h",
"src/compiler/turboshaft/fast-hash.h",
"src/compiler/turboshaft/graph-builder.cc",
"src/compiler/turboshaft/graph-builder.h",
"src/compiler/turboshaft/graph.cc",
"src/compiler/turboshaft/graph.h",
"src/compiler/turboshaft/index.h",
"src/compiler/turboshaft/graph-visualizer.cc",
"src/compiler/turboshaft/graph-visualizer.h",
"src/compiler/turboshaft/late-escape-analysis-reducer.h",
"src/compiler/turboshaft/late-escape-analysis-reducer.cc",
"src/compiler/turboshaft/late-optimization-phase.cc",
"src/compiler/turboshaft/late-optimization-phase.h",
"src/compiler/turboshaft/layered-hash-map.h",
"src/compiler/turboshaft/machine-lowering-phase.cc",
"src/compiler/turboshaft/machine-lowering-phase.h",
"src/compiler/turboshaft/machine-lowering-reducer.h",
"src/compiler/turboshaft/machine-optimization-reducer.h",
"src/compiler/turboshaft/memory-optimization-reducer.cc",
"src/compiler/turboshaft/memory-optimization-reducer.h",
"src/compiler/turboshaft/operations.cc",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/operation-matching.h",
"src/compiler/turboshaft/optimization-phase.cc",
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/optimize-phase.cc",
"src/compiler/turboshaft/optimize-phase.h",
"src/compiler/turboshaft/phase.cc",
"src/compiler/turboshaft/phase.h",
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/turboshaft/recreate-schedule-phase.cc",
"src/compiler/turboshaft/recreate-schedule-phase.h",
"src/compiler/turboshaft/reducer-traits.h",
"src/compiler/turboshaft/representations.cc",
"src/compiler/turboshaft/representations.h",
"src/compiler/turboshaft/runtime-call-descriptors.h",
"src/compiler/turboshaft/select-lowering-reducer.h",
"src/compiler/turboshaft/sidetable.h",
"src/compiler/turboshaft/simplify-tf-loops.cc",
"src/compiler/turboshaft/simplify-tf-loops.h",
"src/compiler/turboshaft/snapshot-table.h",
"src/compiler/turboshaft/tag-untag-lowering-phase.cc",
"src/compiler/turboshaft/tag-untag-lowering-phase.h",
"src/compiler/turboshaft/tag-untag-lowering-reducer.h",
"src/compiler/turboshaft/tracing.h",
"src/compiler/turboshaft/type-inference-analysis.h",
"src/compiler/turboshaft/type-inference-reducer.h",
"src/compiler/turboshaft/typer.cc",
"src/compiler/turboshaft/typer.h",
"src/compiler/turboshaft/type-assertions-phase.cc",
"src/compiler/turboshaft/type-assertions-phase.h",
"src/compiler/turboshaft/type-parser.cc",
"src/compiler/turboshaft/type-parser.h",
"src/compiler/turboshaft/typed-optimizations-phase.cc",
"src/compiler/turboshaft/typed-optimizations-phase.h",
"src/compiler/turboshaft/typed-optimizations-reducer.h",
"src/compiler/turboshaft/types.cc",
"src/compiler/turboshaft/types.h",
"src/compiler/turboshaft/undef-assembler-macros.inc",
"src/compiler/turboshaft/uniform-reducer-adapter.h",
"src/compiler/turboshaft/utils.cc",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",
"src/compiler/turboshaft/variable-reducer.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.cc",
@ -2930,14 +2979,71 @@ filegroup(
"src/compiler/zone-stats.cc",
"src/compiler/zone-stats.h",
] + select({
"@v8//bazel/config:v8_target_ia32": [
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
],
"@v8//bazel/config:v8_target_x64": [
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.h",
],
"@v8//bazel/config:v8_target_arm": [
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.h",
],
"@v8//bazel/config:v8_target_arm64": [
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
],
"@v8//bazel/config:v8_target_s390x": [
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.h",
],
"@v8//bazel/config:v8_target_riscv64": [
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
"src/compiler/backend/riscv64/instruction-codes-riscv64.h",
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
],
"@v8//bazel/config:v8_target_ppc64le": [
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
],
}) + select({
":is_v8_enable_webassembly": [
"src/compiler/int64-lowering.cc",
"src/compiler/int64-lowering.h",
"src/compiler/wasm-call-descriptors.cc",
"src/compiler/wasm-call-descriptors.h",
"src/compiler/wasm-compiler-definitions.h",
"src/compiler/wasm-compiler.cc",
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-escape-analysis.cc",
"src/compiler/wasm-escape-analysis.h",
"src/compiler/wasm-load-elimination.cc",
"src/compiler/wasm-load-elimination.h",
"src/compiler/wasm-loop-peeling.cc",
"src/compiler/wasm-loop-peeling.h",
"src/compiler/wasm-gc-lowering.cc",
@ -2948,6 +3054,8 @@ filegroup(
"src/compiler/wasm-graph-assembler.h",
"src/compiler/wasm-inlining.cc",
"src/compiler/wasm-inlining.h",
"src/compiler/wasm-inlining-into-js.cc",
"src/compiler/wasm-inlining-into-js.h",
"src/compiler/wasm-typer.cc",
"src/compiler/wasm-typer.h",
],
@ -2956,7 +3064,7 @@ filegroup(
)
filegroup(
name = "v8_initializers_files",
name = "noicu/v8_initializers_files",
srcs = [
"src/builtins/builtins-array-gen.cc",
"src/builtins/builtins-array-gen.h",
@ -2990,6 +3098,7 @@ filegroup(
"src/builtins/builtins-microtask-queue-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-object-gen.h",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
@ -3046,6 +3155,14 @@ filegroup(
}),
)
filegroup(
name = "icu/v8_initializers_files",
srcs = [
"src/builtins/builtins-intl-gen.cc",
":noicu/v8_initializers_files",
],
)
filegroup(
name = "cppgc_base_files",
srcs = [
@ -3160,16 +3277,16 @@ filegroup(
# Note these cannot be v8_target_is_* selects because these contain
# inline assembly that runs inside the executable. Since these are
# linked directly into mksnapshot, they must use the actual target cpu.
"@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/save_registers_asm.cc"],
"@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/save_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/save_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/save_registers_masm.S"],
"@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/push_registers_asm.cc"],
"@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"],
}),
)
@ -3215,6 +3332,8 @@ filegroup(
"src/snapshot/embedded/platform-embedded-file-writer-mac.h",
"src/snapshot/embedded/platform-embedded-file-writer-win.cc",
"src/snapshot/embedded/platform-embedded-file-writer-win.h",
"src/snapshot/static-roots-gen.cc",
"src/snapshot/static-roots-gen.h",
"src/snapshot/mksnapshot.cc",
"src/snapshot/snapshot-empty.cc",
],
@ -3223,6 +3342,8 @@ filegroup(
filegroup(
name = "v8_inspector_files",
srcs = [
"src/inspector/crc32.cc",
"src/inspector/crc32.h",
"src/inspector/custom-preview.cc",
"src/inspector/custom-preview.h",
"src/inspector/injected-script.cc",
@ -3249,6 +3370,8 @@ filegroup(
"src/inspector/v8-debugger.h",
"src/inspector/v8-debugger-agent-impl.cc",
"src/inspector/v8-debugger-agent-impl.h",
"src/inspector/v8-debugger-barrier.cc",
"src/inspector/v8-debugger-barrier.h",
"src/inspector/v8-debugger-id.cc",
"src/inspector/v8-debugger-id.h",
"src/inspector/v8-debugger-script.cc",
@ -3374,8 +3497,8 @@ filegroup(
# TODO(victorgomes): Add support to tools/debug_helper,
# which needs class-debug-readers and debug-macros.
v8_torque(
name = "generated_torque_files",
v8_torque_definitions(
name = "generated_torque_definitions",
args = select({
":is_v8_annotate_torque_ir": ["-annotate-ir"],
"//conditions:default": [],
@ -3391,12 +3514,8 @@ v8_torque(
"class-forward-declarations.h",
"class-verifiers.cc",
"class-verifiers.h",
"csa-types.h",
# "debug-macros.cc",
# "debug-macros.h",
"enum-verifiers.cc",
"exported-macros-assembler.cc",
"exported-macros-assembler.h",
"factory.cc",
"factory.inc",
"instance-types.h",
@ -3409,8 +3528,28 @@ v8_torque(
noicu_srcs = [":noicu/torque_files"],
)
v8_torque_initializers(
name = "generated_torque_initializers",
args = select({
":is_v8_annotate_torque_ir": ["-annotate-ir"],
"//conditions:default": [],
}) + select({
"@v8//bazel/config:v8_target_is_32_bits": ["-m32"],
"//conditions:default": [],
}),
extras = [
"csa-types.h",
"enum-verifiers.cc",
"exported-macros-assembler.cc",
"exported-macros-assembler.h",
],
icu_srcs = [":icu/torque_files"],
noicu_srcs = [":noicu/torque_files"],
)
py_binary(
name = "code_generator",
python_version = "PY3",
srcs = [
"third_party/inspector_protocol/code_generator.py",
"third_party/inspector_protocol/pdl.py",
@ -3476,8 +3615,6 @@ filegroup(
name = "v8_common_libshared_files",
srcs = [
":torque_runtime_support_files",
":v8_compiler_files",
":v8_initializers_files",
":v8_libplatform_files",
":v8_libsampler_files",
":v8_shared_internal_headers",
@ -3547,14 +3684,16 @@ v8_mksnapshot(
# NOTE: This allow headers to be accessed without the icu/noicu prefixes.
cc_library(
name = "icu/generated_torque_headers",
hdrs = [":icu/generated_torque_files"],
name = "icu/generated_torque_definitions_headers",
hdrs = [":icu/generated_torque_definitions"],
copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "icu",
)
cc_library(
name = "noicu/generated_torque_headers",
hdrs = [":noicu/generated_torque_files"],
name = "noicu/generated_torque_definitions_headers",
hdrs = [":noicu/generated_torque_definitions"],
copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "noicu",
)
@ -3564,6 +3703,7 @@ v8_library(
":v8_libbase_files",
":v8_shared_internal_headers",
],
copts = ["-Wno-implicit-fallthrough"],
)
cc_library(
@ -3572,6 +3712,7 @@ cc_library(
"src/torque/kythe-data.h",
"src/torque/torque-compiler.h",
],
copts = ["-Wno-implicit-fallthrough"],
include_prefix = "third_party/v8",
includes = ["."],
)
@ -3581,7 +3722,7 @@ cc_library(
srcs = [
":torque_base_files",
],
copts = select({
copts = ["-Wno-implicit-fallthrough"] + select({
"@v8//bazel/config:is_posix": ["-fexceptions"],
"//conditions:default": [],
}),
@ -3597,21 +3738,27 @@ v8_library(
srcs = [
":v8_base_without_compiler_files",
":v8_common_libshared_files",
],
] + select({
":is_v8_enable_turbofan": [
":v8_compiler_files",
],
"//conditions:default": [],
}),
copts = ["-Wno-implicit-fallthrough"],
icu_deps = [
":icu/generated_torque_headers",
":icu/generated_torque_definitions_headers",
"//external:icu",
],
icu_srcs = [
":generated_regexp_special_case",
":icu/generated_torque_files",
":icu/generated_torque_definitions",
":icu/v8_base_without_compiler_files",
],
noicu_deps = [
":noicu/generated_torque_headers",
":noicu/generated_torque_definitions_headers",
],
noicu_srcs = [
":noicu/generated_torque_files",
":noicu/generated_torque_definitions",
],
deps = [
":v8_libbase",
@ -3621,8 +3768,17 @@ v8_library(
v8_library(
name = "v8",
srcs = [":v8_inspector_files"],
srcs = [
":v8_inspector_files",
] + select({
":is_not_v8_enable_turbofan": [
# With Turbofan disabled, we only include the stubbed-out API.
"src/compiler/turbofan-disabled.cc",
],
"//conditions:default": [],
}),
hdrs = [":public_header_files"],
copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8_libshared"],
icu_srcs = [":icu/snapshot_files"],
noicu_deps = [":noicu/v8_libshared"],
@ -3635,6 +3791,7 @@ v8_library(
name = "wee8",
srcs = [":wee8_files"],
hdrs = [":public_wasm_c_api_header_files"],
copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "third_party",
visibility = ["//visibility:public"],
deps = [":noicu/v8"],
@ -3664,6 +3821,7 @@ v8_binary(
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
],
copts = ["-Wno-implicit-fallthrough"],
deps = ["v8_libbase"],
)
@ -3675,6 +3833,7 @@ v8_binary(
":v8_libbase_files",
":v8_shared_internal_headers",
],
copts = ["-Wno-implicit-fallthrough"],
defines = [
"V8_INTL_SUPPORT",
"ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC",
@ -3693,10 +3852,11 @@ v8_binary(
"src/torque/torque.cc",
":torque_base_files",
],
copts = select({
copts = ["-Wno-implicit-fallthrough"] + select({
"@v8//bazel/config:is_posix": ["-fexceptions"],
"//conditions:default": [],
}),
icu_defines = [ "V8_INTL_SUPPORT" ],
features = ["-use_header_modules"],
linkopts = select({
"@v8//bazel/config:is_android": ["-llog"],
@ -3707,26 +3867,49 @@ v8_binary(
v8_binary(
name = "mksnapshot",
srcs = [":mksnapshot_files"],
srcs = [
":mksnapshot_files",
] + select({
":is_not_v8_enable_turbofan": [
# Turbofan is needed to generate builtins.
":v8_compiler_files",
],
"//conditions:default": [],
}),
copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8_libshared"],
linkopts = select({
"@v8//bazel/config:is_android": ["-llog"],
"//conditions:default": [],
}),
icu_srcs = [
":icu/generated_torque_initializers",
":icu/v8_initializers_files",
],
noicu_deps = [":v8_libshared_noicu"],
noicu_srcs = [
":noicu/generated_torque_initializers",
":noicu/v8_initializers_files",
],
)
v8_binary(
name = "d8",
srcs = [":d8_files"],
copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8"],
noicu_deps = [":noicu/v8"],
)
# This target forces torque to be compiled without pointer compression.
v8_binary_non_pointer_compression(
name = "torque_non_pointer_compression",
binary = "torque",
name = "noicu/torque_non_pointer_compression",
binary = "noicu/torque",
)
v8_binary_non_pointer_compression(
name = "icu/torque_non_pointer_compression",
binary = "icu/torque",
)
alias(

967
deps/v8/BUILD.gn vendored

File diff suppressed because it is too large Load diff

84
deps/v8/DEPS vendored
View file

@ -30,8 +30,18 @@ vars = {
'checkout_instrumented_libraries': False,
'checkout_ittapi': False,
# Fetch the prebuilt binaries for llvm-cov and llvm-profdata. Needed to
# process the raw profiles produced by instrumented targets (built with
# the gn arg 'use_clang_coverage').
'checkout_clang_coverage_tools': False,
# Fetch clang-tidy into the same bin/ directory as our clang binary.
'checkout_clang_tidy': False,
# Fetch and build V8 builtins with PGO profiles
'checkout_v8_builtins_pgo_profiles': False,
'chromium_url': 'https://chromium.googlesource.com',
'android_url': 'https://android.googlesource.com',
'download_gcmole': False,
@ -43,22 +53,22 @@ vars = {
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
# reclient CIPD package version
'reclient_version': 're_client_version:0.83.0.da55f4f-gomaip',
'reclient_version': 're_client_version:0.99.0.3f95625-gomaip',
# GN CIPD package version.
'gn_version': 'git_revision:a4d67be044b42963de801001e7146f9657c7fad4',
'gn_version': 'git_revision:41fef642de70ecdcaaa26be96d56a0398f95abd4',
# ninja CIPD package version
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
'ninja_version': 'version:2@1.8.2.chromium.3',
'ninja_version': 'version:2@1.11.1.chromium.6',
# luci-go CIPD package version.
'luci_go': 'git_revision:f8f64a8c560d2bf68a3ad1137979d17cffb36d30',
'luci_go': 'git_revision:320bf3ed60cd4d24549d0ea9ee3a94394f2665ce',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:10.20221109.1.1',
'fuchsia_version': 'version:12.20230322.3.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@ -91,18 +101,18 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
'android_sdk_cmdline-tools_version': 'IPzAG-uU5zVMxohpg9-7-N0tQC1TCSW1VbrBFw7Ld04C',
'android_sdk_cmdline-tools_version': '3Yn5Sn7BMObm8gsoZCF0loJMKg9_PpgU07G9DObCLdQC',
}
deps = {
'base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556',
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '147f65333c38ddd1ebf554e89965c243c8ce50b3',
'build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + '875cb19167f2e0d7b1eca89a4d5b5693421424c6',
Var('chromium_url') + '/chromium/src/build.git' + '@' + '9e9a4341dd24e68cba0f228567a6edbaff1c665b',
'buildtools':
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '49ac7cf34ab2e59a10629a7a722cfb94348c4996',
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '0a6c69640f1841d9109eac70a25af310d4c1d8c7',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef',
'buildtools/linux64': {
'packages': [
{
@ -124,11 +134,11 @@ deps = {
'condition': 'host_os == "mac"',
},
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '4218f3525ad438b22b0e173d963515a09d143398',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'e44019bfac2b2d3ebe1618628884f85c8600e322',
'buildtools/third_party/libc++abi/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '1a32724f721e1c3b6c590a07fe4a954344f15e48',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '9643f2cf13d6935a84a30b7da7de53327733e190',
'buildtools/third_party/libunwind/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a318d6a4c283a9d342d2a1e20292c1496fe12997',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '29a6dda8c6588ba4abeafdb21be531e757983e31',
'buildtools/win': {
'packages': [
{
@ -147,14 +157,14 @@ deps = {
}
],
'dep_type': 'cipd',
'condition': '(host_os == "linux" or host_os == "mac" or host_os == "win") and host_cpu != "s390" and host_cpu != "ppc"',
'condition': '(host_os == "linux" or host_os == "mac" or host_os == "win") and host_cpu != "s390" and host_cpu != "ppc" and host_cpu != "arm64"',
},
'test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ade328d530525333751e8a3b58f02e18624da085',
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd216cc197269fc41eb6eca14710529c3d6650535',
'third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
'condition': 'checkout_android',
@ -202,15 +212,15 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + 'f0b11967c94cba8f7cca91d2da20c98d4420fc25',
'url': Var('chromium_url') + '/catapult.git' + '@' + '018d397758e54d6a6d3b6ddf28a1784664d63f83',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
'url': Var('chromium_url') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
'url': Var('chromium_url') + '/external/colorama.git' + '@' + '3de9f013df4b470069d03d250224062e8cf15c49',
'condition': 'checkout_android',
},
'third_party/depot_tools':
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ae1a70891738fb14f64fbb884e00b87ac663aa15',
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '479e1e9055020c8d1351bf2194d0a606aeca93d5',
'third_party/fuchsia-sdk/sdk': {
'packages': [
{
@ -227,9 +237,9 @@ deps = {
'third_party/googletest/src':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
'third_party/icu':
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'da07448619763d1cde255b361324242646f5b268',
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1e49ac26ddc712b1ab702f69023cbc57e9ae6628',
'third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '7bb87a375ffc3effd17a50f690099dcfb9ee280b',
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '0f536d22dbed454b1254c7e6d7130eab28fba1fa',
'third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
@ -237,7 +247,7 @@ deps = {
'condition': "checkout_ittapi or check_v8_header_includes",
},
'third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '4633bf431193690c3491244f5a0acbe9ac776233',
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '264c07d7e64f2874434a3b8039e101ddf1b01e7e',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
'third_party/logdog/logdog':
@ -255,7 +265,7 @@ deps = {
'condition': 'host_cpu != "s390" and host_cpu != "ppc"'
},
'third_party/perfetto':
Var('android_url') + '/platform/external/perfetto.git' + '@' + '0eba417b2c72264fa825dc21067b9adc9b8adf70',
Var('android_url') + '/platform/external/perfetto.git' + '@' + '0d180f46481a96cbe8340734fa5cdce3bba636c8',
'third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3',
'third_party/requests': {
@ -263,9 +273,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '8bbd6c3129b5146489f2321f054e855c347857f4',
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '5edb52d4302d7aef232d585ec9ae27ef5c3c5438',
'tools/clang':
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'd3df9cc5362e0af4cda798b0612dde39783b3dc0',
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '7a7207a7f2eb0f7f5c9f304a51077a2fd504b3ed',
'tools/luci-go': {
'packages': [
{
@ -499,7 +509,7 @@ hooks = [
'--arch=x64'],
},
{
'name': 'msan_chained_origins',
'name': 'msan_chained_origins_focal',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
@ -507,11 +517,11 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins.tgz.sha1',
'-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-focal.tgz.sha1',
],
},
{
'name': 'msan_no_origins',
'name': 'msan_no_origins_focal',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
@ -519,7 +529,7 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'third_party/instrumented_libraries/binaries/msan-no-origins.tgz.sha1',
'-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-focal.tgz.sha1',
],
},
{
@ -557,6 +567,14 @@ hooks = [
'condition': 'host_os != "aix"',
'action': ['python3', 'tools/clang/scripts/update.py'],
},
{
# This is supposed to support the same set of platforms as 'clang' above.
'name': 'clang_coverage',
'pattern': '.',
'condition': 'checkout_clang_coverage_tools',
'action': ['python3', 'tools/clang/scripts/update.py',
'--package=coverage_tools'],
},
{
'name': 'clang_tidy',
'pattern': '.',
@ -606,6 +624,16 @@ hooks = [
'tools/generate-header-include-checks.py',
],
},
{
'name': 'checkout_v8_builtins_pgo_profiles',
'pattern': '.',
'condition': 'checkout_v8_builtins_pgo_profiles',
'action': [
'python3',
'tools/builtins-pgo/download_profiles.py',
'download',
],
},
{
# Clean up build dirs for crbug.com/1337238.
# After a libc++ roll and revert, .ninja_deps would get into a state

1
deps/v8/PPC_OWNERS vendored
View file

@ -2,4 +2,3 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
vasili.skurydzin@ibm.com

View file

@ -85,6 +85,7 @@ def _V8PresubmitChecks(input_api, output_api):
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from v8_presubmit import CppLintProcessor
from v8_presubmit import GCMoleProcessor
from v8_presubmit import JSLintProcessor
from v8_presubmit import TorqueLintProcessor
from v8_presubmit import SourceProcessor
@ -126,6 +127,9 @@ def _V8PresubmitChecks(input_api, output_api):
if not StatusFilesProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=True)):
results.append(output_api.PresubmitError("Status file check failed"))
if not GCMoleProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError("GCMole pattern check failed"))
results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api, bot_allowlist=[
'v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com'
@ -257,8 +261,9 @@ def _CheckHeadersHaveIncludeGuards(input_api, output_api):
files_to_check=(file_inclusion_pattern, ),
files_to_skip=files_to_skip)
leading_src_pattern = input_api.re.compile(r'^src/')
dash_dot_slash_pattern = input_api.re.compile(r'[-./]')
leading_src_pattern = input_api.re.compile(r'^src[\\\/]')
dash_dot_slash_pattern = input_api.re.compile(r'[-.\\\/]')
def PathToGuardMacro(path):
"""Guards should be of the form V8_PATH_TO_FILE_WITHOUT_SRC_H_."""
x = input_api.re.sub(leading_src_pattern, 'v8_', path)

1
deps/v8/S390_OWNERS vendored
View file

@ -2,4 +2,3 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
vasili.skurydzin@ibm.com

17
deps/v8/WATCHLISTS vendored
View file

@ -101,11 +101,16 @@
'|test/unittests/heap/cppgc/' \
'|test/unittests/heap/cppgc-js/',
},
'trap-handler': {
'filepath': 'src/trap-handler/',
},
'tests': {
'filepath': 'test/',
},
},
'WATCHLISTS': {
'maglev': [
'jgruber+watch@chromium.org',
'leszeks+watch@chromium.org',
'verwaest+watch@chromium.org',
'victorgomes+watch@chromium.org',
@ -122,6 +127,7 @@
],
'feature_shipping_status': [
'hablich@chromium.org',
'saelo+watch@chromium.org',
],
'heap_changes': [
'hpayer@chromium.org',
@ -168,5 +174,14 @@
'cppgc': [
'oilpan-reviews+v8@chromium.org',
],
'trap-handler': [
'ahaas@chromium.org',
'clemensb@chromium.org',
'mark@chromium.org',
'mseaborn@chromium.org',
],
'tests': [
'almuthanna+watch@chromium.org',
],
},
}

View file

@ -208,9 +208,6 @@
#include "base/time/time.h"
#include "build/build_config.h"
// Export Perfetto symbols in the same way as //base symbols.
#define PERFETTO_COMPONENT_EXPORT BASE_EXPORT
// Enable legacy trace event macros (e.g., TRACE_EVENT{0,1,2}).
#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 1
@ -224,11 +221,6 @@
// variable a unique name based on the line number to prevent name collisions.
#define INTERNAL_TRACE_EVENT_UID(name_prefix) PERFETTO_UID(name_prefix)
// Special trace event macro to trace log messages.
// TODO(skyostil): Convert this into a regular typed trace event.
#define TRACE_LOG_MESSAGE(file, message, line) \
INTERNAL_TRACE_LOG_MESSAGE(file, message, line)
// Declare debug annotation converters for base time types, so they can be
// passed as trace event arguments.
// TODO(skyostil): Serialize timestamps using perfetto::TracedValue instead.
@ -250,7 +242,8 @@ WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation, ::base::Time);
} // namespace perfetto
// Pull in the tracing macro definitions from Perfetto.
#include "third_party/perfetto/include/perfetto/tracing.h"
#include "third_party/perfetto/include/perfetto/tracing/track_event.h"
#include "third_party/perfetto/include/perfetto/tracing/track_event_legacy.h"
namespace perfetto {
namespace legacy {
@ -983,10 +976,6 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> {
category_group, name, id, \
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
// Special trace event macro to trace log messages.
#define TRACE_LOG_MESSAGE(file, message, line) \
INTERNAL_TRACE_LOG_MESSAGE(file, message, line)
// TRACE_EVENT_METADATA* events are information related to other
// injected events, not events in their own right.
#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \

View file

@ -22,6 +22,13 @@ config_setting(
},
)
config_setting(
name = "is_opt",
values = {
"compilation_mode": "opt",
},
)
config_setting(
name = "is_debug",
values = {
@ -29,6 +36,14 @@ config_setting(
},
)
selects.config_setting_group(
name = "is_opt_android",
match_all = [
":is_opt",
":is_android",
],
)
config_setting(
name = "platform_cpu_x64",
constraint_values = ["@platforms//cpu:x86_64"],

165
deps/v8/bazel/defs.bzl vendored
View file

@ -2,7 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
FlagInfo = provider(fields = ["value"])
"""
This module contains helper functions to compile V8.
"""
FlagInfo = provider("The value of an option.",
fields = ["value"])
def _options_impl(ctx):
return FlagInfo(value = ctx.build_setting_value)
@ -151,6 +156,13 @@ def _default_args():
"-fno-integrated-as",
],
"//conditions:default": [],
}) + select({
"@v8//bazel/config:is_opt_android": [
"-fvisibility=hidden",
"-fvisibility-inlines-hidden",
],
"//conditions:default": [
],
}),
includes = ["include"],
linkopts = select({
@ -175,29 +187,33 @@ ENABLE_I18N_SUPPORT_DEFINES = [
"-DUNISTR_FROM_CHAR_EXPLICIT=",
]
def _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps):
return noicu_srcs != [] or noicu_deps != [] or icu_srcs != [] or icu_deps != []
def _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, noicu_defines, icu_srcs, icu_deps, icu_defines):
return noicu_srcs != [] or noicu_deps != [] or noicu_defines != [] or icu_srcs != [] or icu_deps != [] or icu_defines != []
# buildifier: disable=function-docstring
def v8_binary(
name,
srcs,
deps = [],
defines = [],
includes = [],
copts = [],
linkopts = [],
noicu_srcs = [],
noicu_deps = [],
noicu_defines = [],
icu_srcs = [],
icu_deps = [],
icu_defines = [],
**kwargs):
default = _default_args()
if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps):
if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, noicu_defines, icu_srcs, icu_deps, icu_defines):
native.cc_binary(
name = "noicu/" + name,
srcs = srcs + noicu_srcs,
deps = deps + noicu_deps + default.deps,
includes = includes + default.includes,
defines = defines + noicu_defines + default.defines,
includes = includes + ["noicu/"] + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
**kwargs
@ -206,7 +222,8 @@ def v8_binary(
name = "icu/" + name,
srcs = srcs + icu_srcs,
deps = deps + icu_deps + default.deps,
includes = includes + default.includes,
includes = includes + ["icu/"] + default.includes,
defines = defines + icu_defines + default.defines,
copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES,
linkopts = linkopts + default.linkopts,
**kwargs
@ -216,6 +233,7 @@ def v8_binary(
name = name,
srcs = srcs,
deps = deps + default.deps,
defines = defines + default.defines,
includes = includes + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
@ -232,16 +250,18 @@ def v8_library(
linkopts = [],
noicu_srcs = [],
noicu_deps = [],
noicu_defines = [],
icu_srcs = [],
icu_deps = [],
icu_defines = [],
**kwargs):
default = _default_args()
if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps):
if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, noicu_defines, icu_srcs, icu_deps, icu_defines):
native.cc_library(
name = name + "_noicu",
srcs = srcs + noicu_srcs,
deps = deps + noicu_deps + default.deps,
includes = includes + default.includes,
includes = includes + ["noicu/"] + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
alwayslink = 1,
@ -260,7 +280,7 @@ def v8_library(
name = name + "_icu",
srcs = srcs + icu_srcs,
deps = deps + icu_deps + default.deps,
includes = includes + default.includes,
includes = includes + ["icu/"] + default.includes,
copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES,
linkopts = linkopts + default.linkopts,
alwayslink = 1,
@ -288,7 +308,7 @@ def v8_library(
**kwargs
)
def _torque_impl(ctx):
def _torque_initializers_impl(ctx):
if ctx.workspace_name == "v8":
v8root = "."
else:
@ -309,7 +329,7 @@ def _torque_impl(ctx):
# Generate/declare output files
outs = []
for src in ctx.files.srcs:
root, period, ext = src.path.rpartition(".")
root, _period, _ext = src.path.rpartition(".")
# Strip v8root
if root[:len(v8root)] == v8root:
@ -317,22 +337,19 @@ def _torque_impl(ctx):
file = ctx.attr.prefix + "/torque-generated/" + root
outs.append(ctx.actions.declare_file(file + "-tq-csa.cc"))
outs.append(ctx.actions.declare_file(file + "-tq-csa.h"))
outs.append(ctx.actions.declare_file(file + "-tq-inl.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.cc"))
outs += [ctx.actions.declare_file(ctx.attr.prefix + "/torque-generated/" + f) for f in ctx.attr.extras]
ctx.actions.run(
outputs = outs,
inputs = ctx.files.srcs,
arguments = args,
executable = ctx.executable.tool,
mnemonic = "GenTorque",
progress_message = "Generating Torque files",
mnemonic = "GenTorqueInitializers",
progress_message = "Generating Torque initializers",
)
return [DefaultInfo(files = depset(outs))]
_v8_torque = rule(
implementation = _torque_impl,
_v8_torque_initializers = rule(
implementation = _torque_initializers_impl,
# cfg = v8_target_cpu_transition,
attrs = {
"prefix": attr.string(mandatory = True),
@ -347,31 +364,114 @@ _v8_torque = rule(
},
)
def v8_torque(name, noicu_srcs, icu_srcs, args, extras):
_v8_torque(
def v8_torque_initializers(name, noicu_srcs, icu_srcs, args, extras):
_v8_torque_initializers(
name = "noicu/" + name,
prefix = "noicu",
srcs = noicu_srcs,
args = args,
extras = extras,
tool = select({
"@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
"//conditions:default": ":torque",
"@v8//bazel/config:v8_target_is_32_bits": ":noicu/torque_non_pointer_compression",
"//conditions:default": ":noicu/torque",
}),
)
_v8_torque(
_v8_torque_initializers(
name = "icu/" + name,
prefix = "icu",
srcs = icu_srcs,
args = args,
extras = extras,
tool = select({
"@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
"//conditions:default": ":torque",
"@v8//bazel/config:v8_target_is_32_bits": ":icu/torque_non_pointer_compression",
"//conditions:default": ":icu/torque",
}),
)
def _v8_target_cpu_transition_impl(settings, attr):
def _torque_definitions_impl(ctx):
if ctx.workspace_name == "v8":
v8root = "."
else:
v8root = "external/v8"
# Arguments
args = []
args += ctx.attr.args
args.append("-o")
args.append(ctx.bin_dir.path + "/" + v8root + "/" + ctx.attr.prefix + "/torque-generated")
args.append("-strip-v8-root")
args.append("-v8-root")
args.append(v8root)
# Sources
args += [f.path for f in ctx.files.srcs]
# Generate/declare output files
outs = []
for src in ctx.files.srcs:
root, _period, _ext = src.path.rpartition(".")
# Strip v8root
if root[:len(v8root)] == v8root:
root = root[len(v8root):]
file = ctx.attr.prefix + "/torque-generated/" + root
outs.append(ctx.actions.declare_file(file + "-tq-inl.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.cc"))
outs += [ctx.actions.declare_file(ctx.attr.prefix + "/torque-generated/" + f) for f in ctx.attr.extras]
ctx.actions.run(
outputs = outs,
inputs = ctx.files.srcs,
arguments = args,
executable = ctx.executable.tool,
mnemonic = "GenTorqueDefinitions",
progress_message = "Generating Torque definitions",
)
return [DefaultInfo(files = depset(outs))]
_v8_torque_definitions = rule(
implementation = _torque_definitions_impl,
# cfg = v8_target_cpu_transition,
attrs = {
"prefix": attr.string(mandatory = True),
"srcs": attr.label_list(allow_files = True, mandatory = True),
"extras": attr.string_list(),
"tool": attr.label(
allow_files = True,
executable = True,
cfg = "exec",
),
"args": attr.string_list(),
},
)
def v8_torque_definitions(name, noicu_srcs, icu_srcs, args, extras):
_v8_torque_definitions(
name = "noicu/" + name,
prefix = "noicu",
srcs = noicu_srcs,
args = args,
extras = extras,
tool = select({
"@v8//bazel/config:v8_target_is_32_bits": ":noicu/torque_non_pointer_compression",
"//conditions:default": ":noicu/torque",
}),
)
_v8_torque_definitions(
name = "icu/" + name,
prefix = "icu",
srcs = icu_srcs,
args = args,
extras = extras,
tool = select({
"@v8//bazel/config:v8_target_is_32_bits": ":icu/torque_non_pointer_compression",
"//conditions:default": ":icu/torque",
}),
)
def _v8_target_cpu_transition_impl(settings,
attr, # @unused
):
# Check for an existing v8_target_cpu flag.
if "@v8//bazel/config:v8_target_cpu" in settings:
if settings["@v8//bazel/config:v8_target_cpu"] != "none":
@ -499,10 +599,10 @@ def build_config_content(cpu, icu):
("is_asan", "false"),
("is_cfi", "false"),
("is_clang", "true"),
("is_clang_coverage", "false"),
("is_component_build", "false"),
("is_debug", "false"),
("is_full_debug", "false"),
("is_gcov_coverage", "false"),
("is_msan", "false"),
("is_tsan", "false"),
("is_ubsan_vptr", "false"),
@ -525,7 +625,18 @@ def build_config_content(cpu, icu):
("v8_enable_single_generation", "false"),
("v8_enable_sandbox", "false"),
("v8_enable_shared_ro_heap", "false"),
("v8_disable_write_barriers", "false"),
("v8_target_cpu", cpu),
("v8_code_comments", "false"),
("v8_enable_debug_code", "false"),
("v8_enable_verify_heap", "false"),
("v8_enable_slow_dchecks", "false"),
("v8_enable_maglev", "false"),
("v8_enable_turbofan", "true"),
("v8_enable_disassembler", "false"),
("is_DEBUG_defined", "false"),
("v8_enable_gdbjit", "false"),
("v8_jitless", "false"),
])
# TODO(victorgomes): Create a rule (instead of a macro), that can

View file

@ -32,6 +32,9 @@ use_perfetto_client_library = false
# Some non-Chromium builds don't support building java targets.
enable_java_templates = false
# Enables assertions on safety checks in libc++.
enable_safe_libcxx = true
# Allows different projects to specify their own suppressions files.
asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc"

View file

@ -104,6 +104,10 @@ if (v8_snapshot_toolchain == "") {
# cross compile Windows arm64 with host toolchain.
v8_snapshot_toolchain = host_toolchain
}
} else if (host_cpu == "arm64" && current_cpu == "arm64" &&
host_os == "mac") {
# cross compile iOS arm64 with host_toolchain
v8_snapshot_toolchain = host_toolchain
}
}

88
deps/v8/gni/v8.gni vendored
View file

@ -2,17 +2,15 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/compiler/pgo/pgo.gni")
import("//build/config/gclient_args.gni")
import("//build/config/ios/config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
import("release_branch_toggle.gni")
import("split_static_library.gni")
declare_args() {
# Set flags for tracking code coverage. Uses gcov with gcc and sanitizer
# coverage with clang.
v8_code_coverage = false
# Includes files needed for correctness fuzzing.
v8_correctness_fuzzer = false
@ -36,6 +34,9 @@ declare_args() {
# the JS builtins sources and the start snapshot.
v8_use_external_startup_data = ""
# Includes profiles to optimize builtins.
v8_enable_builtins_optimization = ""
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
v8_enable_i18n_support = true
@ -63,10 +64,19 @@ declare_args() {
# Sets -DV8_LITE_MODE.
v8_enable_lite_mode = false
# Enable the Turbofan compiler.
# Sets -dV8_ENABLE_TURBOFAN.
v8_enable_turbofan = ""
# Enable the Maglev compiler.
# Sets -dV8_ENABLE_MAGLEV
v8_enable_maglev = ""
# Include support for WebAssembly. If disabled, the 'WebAssembly' global
# will not be available, and embedder APIs to generate WebAssembly modules
# will fail. Also, asm.js will not be translated to WebAssembly and will be
# executed as standard JavaScript instead.
# Sets -dV8_ENABLE_WEBASSEMBLY.
v8_enable_webassembly = ""
# Enable 256-bit long vector re-vectorization pass in WASM compilation pipeline.
@ -81,12 +91,6 @@ declare_args() {
# Scan the call stack conservatively during garbage collection.
v8_enable_conservative_stack_scanning = false
# Use the object start bitmap for inner pointer resolution.
v8_enable_inner_pointer_resolution_osb = false
# Use the marking bitmap for inner pointer resolution.
v8_enable_inner_pointer_resolution_mb = false
v8_enable_google_benchmark = false
cppgc_is_standalone = false
@ -97,6 +101,12 @@ declare_args() {
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
# Enables a slim write barrier that only performs a single check in the fast
# path and delegates all further checks to a slow path call. This is fast
# in a setting with few slow-path checks, i.e., with disabled young generation
# GC.
cppgc_enable_slim_write_barrier = true
# Enable pointer compression in cppgc.
cppgc_enable_pointer_compression = false
@ -107,6 +117,20 @@ declare_args() {
# Enable advanced BigInt algorithms, costing about 10-30 KB binary size
# depending on platform. Disabled on Android to save binary size.
v8_advanced_bigint_algorithms = !is_android
# TODO: macros for determining endian type are clang specific.
v8_use_libm_trig_functions = is_clang
# iOS device does not support executable code pages. Not we
# use target_os == "ios" here because it isn't equivalent
# to is_ios (is_ios is based on host_os).
target_is_ios_device = target_os == "ios" && target_environment == "device"
# Location of icu.
v8_icu_path = "//third_party/icu"
# Location of zlib.
v8_zlib_path = "//third_party/zlib"
}
if (v8_use_external_startup_data == "") {
@ -125,13 +149,36 @@ if (build_with_chromium && use_perfetto_client_library) {
v8_use_perfetto = true
}
# Includes profiles to optimize builtins if
# * it is a Chromium build, and
# * Chromium builds with optimization.
# If no profiles are downloaded during gclient runhooks, optimization fails
# silently.
if (v8_enable_builtins_optimization == "") {
v8_enable_builtins_optimization = build_with_chromium && chrome_pgo_phase == 2
}
# TODO(jgruber): Move v8_jitless from BUILD.gn here as these
# all depend on each other and really should be derived from
# v8_jitless.
# WebAssembly is enabled by default, except in lite mode.
if (v8_enable_webassembly == "") {
v8_enable_webassembly = !v8_enable_lite_mode
# iOS (non-simulator) does not have executable pages for 3rd party
# applications yet so disable webassembly.
v8_enable_webassembly = !v8_enable_lite_mode && !target_is_ios_device
}
assert(!(v8_enable_webassembly && v8_enable_lite_mode),
"Webassembly is not available in lite mode.")
# Turbofan is enabled by default, except in lite mode.
if (v8_enable_turbofan == "") {
# iOS (non-simulator) does not have executable pages for 3rd party
# applications yet so disable turbofan.
v8_enable_turbofan = !v8_enable_lite_mode && !target_is_ios_device
}
assert(v8_enable_turbofan || !v8_enable_webassembly,
"Webassembly is not available when Turbofan is disabled.")
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
# paths for all configs in templates as they are shared in different
# subdirectories.
@ -172,13 +219,6 @@ if (!is_debug) {
]
}
if (v8_code_coverage && !is_clang) {
v8_add_configs += [
v8_path_prefix + ":v8_gcov_coverage_cflags",
v8_path_prefix + ":v8_gcov_coverage_ldflags",
]
}
if (v8_symbol_level != symbol_level) {
v8_remove_configs += [ "//build/config/compiler:default_symbols" ]
if (v8_symbol_level == 0) {
@ -260,23 +300,13 @@ template("v8_executable") {
# For enabling ASLR.
ldflags = [ "-pie" ]
}
if (defined(testonly) && testonly && v8_code_coverage) {
# Only add code coverage cflags for non-test files for performance
# reasons.
if (is_clang) {
configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
configs +=
[ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
} else {
configs -= [ v8_path_prefix + ":v8_gcov_coverage_cflags" ]
}
}
deps += [ v8_path_prefix + ":v8_dump_build_config" ]
}
}
template("v8_component") {
component(target_name) {
output_name = target_name
forward_variables_from(invoker,
"*",
[

View file

@ -148,10 +148,11 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicCrossThreadPersistent(
internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>
MemberCheckingPolicy, MemberStorageType>
member,
const SourceLocation& loc = SourceLocation::Current())
: BasicCrossThreadPersistent(member.Get(), loc) {}
@ -230,10 +231,11 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
// Assignment from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicCrossThreadPersistent& operator=(
internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>
MemberCheckingPolicy, MemberStorageType>
member) {
return operator=(member.Get());
}

View file

@ -62,10 +62,10 @@ class HeapConsistency final {
* \returns whether a write barrier is needed and which barrier to invoke.
*/
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
typename CheckingPolicy, typename StorageType>
static V8_INLINE WriteBarrierType GetWriteBarrierType(
const internal::BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& value,
CheckingPolicy, StorageType>& value,
WriteBarrierParams& params) {
return internal::WriteBarrier::GetWriteBarrierType(
value.GetRawSlot(), value.GetRawStorage(), params);

View file

@ -32,7 +32,7 @@ static constexpr uint16_t kFullyConstructedBitMask = uint16_t{1};
static constexpr size_t kPageSize = size_t{1} << 17;
#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_DARWIN)
constexpr size_t kGuardPageSize = 0;
#else
constexpr size_t kGuardPageSize = 4096;
@ -57,6 +57,9 @@ static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment;
// Granularity of heap allocations.
constexpr size_t kAllocationGranularity = sizeof(void*);
// Default cacheline size.
constexpr size_t kCachelineSize = 64;
} // namespace api_constants
} // namespace internal

View file

@ -10,6 +10,7 @@
#include <type_traits>
#include "cppgc/internal/finalizer-trait.h"
#include "cppgc/internal/logging.h"
#include "cppgc/internal/name-trait.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
@ -20,12 +21,12 @@ namespace internal {
using GCInfoIndex = uint16_t;
struct V8_EXPORT EnsureGCInfoIndexTrait final {
// Acquires a new GC info object and returns the index. In addition, also
// updates `registered_index` atomically.
// Acquires a new GC info object and updates `registered_index` with the index
// that identifies that new info accordingly.
template <typename T>
V8_INLINE static GCInfoIndex EnsureIndex(
V8_INLINE static void EnsureIndex(
std::atomic<GCInfoIndex>& registered_index) {
return EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
}
private:
@ -34,38 +35,32 @@ struct V8_EXPORT EnsureGCInfoIndexTrait final {
bool = NameTrait<T>::HasNonHiddenName()>
struct EnsureGCInfoIndexTraitDispatch;
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback, NameCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback,
FinalizationCallback, NameCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, FinalizationCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, NameCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback,
FinalizationCallback, NameCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, FinalizationCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, NameCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback);
};
#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \
template <typename T> \
struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \
T, is_polymorphic, has_finalizer, has_non_hidden_name> { \
V8_INLINE GCInfoIndex \
operator()(std::atomic<GCInfoIndex>& registered_index) { \
return function; \
V8_INLINE void operator()(std::atomic<GCInfoIndex>& registered_index) { \
function; \
} \
};
@ -143,9 +138,16 @@ struct GCInfoTrait final {
static_assert(sizeof(T), "T must be fully defined");
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
return index ? index
: EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
GCInfoIndex index = registered_index.load(std::memory_order_acquire);
if (V8_UNLIKELY(!index)) {
EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
// Slow path call uses V8_PRESERVE_MOST which does not support return
// values (also preserves RAX). Avoid out parameter by just reloading the
// value here which at this point is guaranteed to be set.
index = registered_index.load(std::memory_order_acquire);
CPPGC_DCHECK(index != 0);
}
return index;
}
};

View file

@ -17,6 +17,11 @@
namespace cppgc {
namespace internal {
enum class WriteBarrierSlotType {
kCompressed,
kUncompressed,
};
#if defined(CPPGC_POINTER_COMPRESSION)
#if defined(__clang__)
@ -30,16 +35,16 @@ namespace internal {
#define CPPGC_REQUIRE_CONSTANT_INIT
#endif // defined(__clang__)
class CageBaseGlobal final {
class V8_EXPORT CageBaseGlobal final {
public:
V8_INLINE CPPGC_CONST static uintptr_t Get() {
CPPGC_DCHECK(IsBaseConsistent());
return g_base_;
return g_base_.base;
}
V8_INLINE CPPGC_CONST static bool IsSet() {
CPPGC_DCHECK(IsBaseConsistent());
return (g_base_ & ~kLowerHalfWordMask) != 0;
return (g_base_.base & ~kLowerHalfWordMask) != 0;
}
private:
@ -47,12 +52,15 @@ class CageBaseGlobal final {
static constexpr uintptr_t kLowerHalfWordMask =
(api_constants::kCagedHeapReservationAlignment - 1);
static V8_EXPORT uintptr_t g_base_ CPPGC_REQUIRE_CONSTANT_INIT;
static union alignas(api_constants::kCachelineSize) Base {
uintptr_t base;
char cache_line[api_constants::kCachelineSize];
} g_base_ CPPGC_REQUIRE_CONSTANT_INIT;
CageBaseGlobal() = delete;
V8_INLINE static bool IsBaseConsistent() {
return kLowerHalfWordMask == (g_base_ & kLowerHalfWordMask);
return kLowerHalfWordMask == (g_base_.base & kLowerHalfWordMask);
}
friend class CageBaseGlobalUpdater;
@ -64,6 +72,8 @@ class CageBaseGlobal final {
class V8_TRIVIAL_ABI CompressedPointer final {
public:
using IntegralType = uint32_t;
static constexpr auto kWriteBarrierSlotType =
WriteBarrierSlotType::kCompressed;
V8_INLINE CompressedPointer() : value_(0u) {}
V8_INLINE explicit CompressedPointer(const void* ptr)
@ -173,6 +183,8 @@ class V8_TRIVIAL_ABI CompressedPointer final {
class V8_TRIVIAL_ABI RawPointer final {
public:
using IntegralType = uintptr_t;
static constexpr auto kWriteBarrierSlotType =
WriteBarrierSlotType::kUncompressed;
V8_INLINE RawPointer() : ptr_(nullptr) {}
V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}
@ -225,9 +237,9 @@ class V8_TRIVIAL_ABI RawPointer final {
};
#if defined(CPPGC_POINTER_COMPRESSION)
using MemberStorage = CompressedPointer;
using DefaultMemberStorage = CompressedPointer;
#else // !defined(CPPGC_POINTER_COMPRESSION)
using MemberStorage = RawPointer;
using DefaultMemberStorage = RawPointer;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
} // namespace internal

View file

@ -33,21 +33,54 @@ struct DijkstraWriteBarrierPolicy {
// barrier doesn't break the tri-color invariant.
}
template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, value, params);
WriteBarrier(type, params, slot, value);
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
V8_INLINE static void AssigningBarrier(const void* slot,
MemberStorage storage) {
template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot, RawPointer storage) {
static_assert(
SlotType == WriteBarrierSlotType::kUncompressed,
"Assigning storages of Member and UncompressedMember is not supported");
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, storage, params);
WriteBarrier(type, params, slot, storage.Load());
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
#if defined(CPPGC_POINTER_COMPRESSION)
template <WriteBarrierSlotType SlotType>
V8_INLINE static void AssigningBarrier(const void* slot,
CompressedPointer storage) {
static_assert(
SlotType == WriteBarrierSlotType::kCompressed,
"Assigning storages of Member and UncompressedMember is not supported");
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, storage, params);
WriteBarrier(type, params, slot, storage.Load());
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
#endif // defined(CPPGC_POINTER_COMPRESSION)
private:
V8_INLINE static void WriteBarrier(WriteBarrier::Type type,
const WriteBarrier::Params& params,
@ -68,7 +101,9 @@ struct DijkstraWriteBarrierPolicy {
struct NoWriteBarrierPolicy {
V8_INLINE static void InitializingBarrier(const void*, const void*) {}
template <WriteBarrierSlotType>
V8_INLINE static void AssigningBarrier(const void*, const void*) {}
template <WriteBarrierSlotType, typename MemberStorage>
V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {}
};
@ -197,7 +232,8 @@ template <typename T, typename WeaknessPolicy,
typename CheckingPolicy = DefaultPersistentCheckingPolicy>
class BasicPersistent;
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy = DefaultMemberCheckingPolicy>
typename CheckingPolicy = DefaultMemberCheckingPolicy,
typename StorageType = DefaultMemberStorage>
class BasicMember;
} // namespace internal

View file

@ -70,6 +70,7 @@ class V8_EXPORT WriteBarrier final {
static V8_INLINE Type GetWriteBarrierType(const void* slot, const void* value,
Params& params);
// Returns the required write barrier for a given `slot` and `value`.
template <typename MemberStorage>
static V8_INLINE Type GetWriteBarrierType(const void* slot, MemberStorage,
Params& params);
// Returns the required write barrier for a given `slot`.
@ -79,6 +80,15 @@ class V8_EXPORT WriteBarrier final {
// Returns the required write barrier for a given `value`.
static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params);
#ifdef CPPGC_SLIM_WRITE_BARRIER
// A write barrier that combines `GenerationalBarrier()` and
// `DijkstraMarkingBarrier()`. We only pass a single parameter here to clobber
// as few registers as possible.
template <WriteBarrierSlotType>
static V8_NOINLINE void V8_PRESERVE_MOST
CombinedWriteBarrierSlow(const void* slot);
#endif // CPPGC_SLIM_WRITE_BARRIER
static V8_INLINE void DijkstraMarkingBarrier(const Params& params,
const void* object);
static V8_INLINE void DijkstraMarkingBarrierRange(
@ -163,7 +173,8 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback,
typename MemberStorage>
static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
@ -207,7 +218,7 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
template <>
struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kValuePresent> {
template <typename HeapHandleCallback>
template <typename HeapHandleCallback, typename MemberStorage>
static V8_INLINE WriteBarrier::Type Get(const void* slot,
MemberStorage storage,
WriteBarrier::Params& params,
@ -305,11 +316,9 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
static V8_INLINE WriteBarrier::Type Get(const void* slot, RawPointer value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
// `MemberStorage` will always be `RawPointer` for non-caged heap builds.
// Just convert to `void*` in this case.
return ValueModeDispatch<value_mode>::Get(slot, value.Load(), params,
callback);
}
@ -383,6 +392,7 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
}
// static
template <typename MemberStorage>
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* slot, MemberStorage value, WriteBarrier::Params& params) {
return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(slot, value,

View file

@ -11,7 +11,10 @@
namespace cppgc {
// Use if the object is only stack allocated.
// Use CPPGC_STACK_ALLOCATED if the object is only stack allocated.
// Add the CPPGC_STACK_ALLOCATED_IGNORE annotation on a case-by-case basis when
// enforcement of CPPGC_STACK_ALLOCATED should be suppressed.
#if defined(__clang__)
#define CPPGC_STACK_ALLOCATED() \
public: \
using IsStackAllocatedTypeMarker CPPGC_UNUSED = int; \
@ -20,6 +23,12 @@ namespace cppgc {
void* operator new(size_t) = delete; \
void* operator new(size_t, void*) = delete; \
static_assert(true, "Force semicolon.")
#define CPPGC_STACK_ALLOCATED_IGNORE(bug_or_reason) \
__attribute__((annotate("stack_allocated_ignore")))
#else // !defined(__clang__)
#define CPPGC_STACK_ALLOCATED() static_assert(true, "Force semicolon.")
#define CPPGC_STACK_ALLOCATED_IGNORE(bug_or_reason)
#endif // !defined(__clang__)
} // namespace cppgc

View file

@ -28,13 +28,11 @@ namespace internal {
// MemberBase always refers to the object as const object and defers to
// BasicMember on casting to the right type as needed.
template <typename StorageType>
class V8_TRIVIAL_ABI MemberBase {
public:
#if defined(CPPGC_POINTER_COMPRESSION)
using RawStorage = CompressedPointer;
#else // !defined(CPPGC_POINTER_COMPRESSION)
using RawStorage = RawPointer;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
using RawStorage = StorageType;
protected:
struct AtomicInitializerTag {};
@ -75,16 +73,19 @@ class V8_TRIVIAL_ABI MemberBase {
// The basic class from which all Member classes are 'generated'.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
typename CheckingPolicy, typename StorageType>
class V8_TRIVIAL_ABI BasicMember final : private MemberBase<StorageType>,
private CheckingPolicy {
using Base = MemberBase<StorageType>;
public:
using PointeeType = T;
using RawStorage = typename Base::RawStorage;
V8_INLINE constexpr BasicMember() = default;
V8_INLINE constexpr BasicMember(std::nullptr_t) {} // NOLINT
V8_INLINE BasicMember(SentinelPointer s) : MemberBase(s) {} // NOLINT
V8_INLINE BasicMember(T* raw) : MemberBase(raw) { // NOLINT
V8_INLINE constexpr BasicMember(std::nullptr_t) {} // NOLINT
V8_INLINE BasicMember(SentinelPointer s) : Base(s) {} // NOLINT
V8_INLINE BasicMember(T* raw) : Base(raw) { // NOLINT
InitializingWriteBarrier(raw);
this->CheckPointer(Get());
}
@ -94,13 +95,13 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
// Atomic ctor. Using the AtomicInitializerTag forces BasicMember to
// initialize using atomic assignments. This is required for preventing
// data races with concurrent marking.
using AtomicInitializerTag = MemberBase::AtomicInitializerTag;
using AtomicInitializerTag = typename Base::AtomicInitializerTag;
V8_INLINE BasicMember(std::nullptr_t, AtomicInitializerTag atomic)
: MemberBase(nullptr, atomic) {}
: Base(nullptr, atomic) {}
V8_INLINE BasicMember(SentinelPointer s, AtomicInitializerTag atomic)
: MemberBase(s, atomic) {}
: Base(s, atomic) {}
V8_INLINE BasicMember(T* raw, AtomicInitializerTag atomic)
: MemberBase(raw, atomic) {
: Base(raw, atomic) {
InitializingWriteBarrier(raw);
this->CheckPointer(Get());
}
@ -119,7 +120,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
std::enable_if_t<internal::IsDecayedSameV<T, U>>* = nullptr>
V8_INLINE BasicMember( // NOLINT
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other)
OtherCheckingPolicy, StorageType>& other)
: BasicMember(other.GetRawStorage()) {}
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
@ -127,7 +128,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
std::enable_if_t<internal::IsStrictlyBaseOfV<T, U>>* = nullptr>
V8_INLINE BasicMember( // NOLINT
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other)
OtherCheckingPolicy, StorageType>& other)
: BasicMember(other.Get()) {}
// Move ctor.
@ -142,8 +143,9 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
typename OtherCheckingPolicy,
std::enable_if_t<internal::IsDecayedSameV<T, U>>* = nullptr>
V8_INLINE BasicMember(BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>&& other) noexcept
V8_INLINE BasicMember(
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
StorageType>&& other) noexcept
: BasicMember(other.GetRawStorage()) {
other.Clear();
}
@ -151,8 +153,9 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
typename OtherCheckingPolicy,
std::enable_if_t<internal::IsStrictlyBaseOfV<T, U>>* = nullptr>
V8_INLINE BasicMember(BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>&& other) noexcept
V8_INLINE BasicMember(
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
StorageType>&& other) noexcept
: BasicMember(other.Get()) {
other.Clear();
}
@ -179,7 +182,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
typename OtherCheckingPolicy>
V8_INLINE BasicMember& operator=(
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other) {
OtherCheckingPolicy, StorageType>& other) {
if constexpr (internal::IsDecayedSameV<T, U>) {
return operator=(other.GetRawStorage());
} else {
@ -201,8 +204,8 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherWeaknessTag, typename OtherBarrierPolicy,
typename OtherCheckingPolicy>
V8_INLINE BasicMember& operator=(
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>&& other) noexcept {
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
StorageType>&& other) noexcept {
if constexpr (internal::IsDecayedSameV<T, U>) {
operator=(other.GetRawStorage());
} else {
@ -226,7 +229,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
}
V8_INLINE BasicMember& operator=(T* other) {
SetRawAtomic(other);
Base::SetRawAtomic(other);
AssigningWriteBarrier(other);
this->CheckPointer(Get());
return *this;
@ -237,20 +240,20 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
return *this;
}
V8_INLINE BasicMember& operator=(SentinelPointer s) {
SetRawAtomic(s);
Base::SetRawAtomic(s);
return *this;
}
template <typename OtherWeaknessTag, typename OtherBarrierPolicy,
typename OtherCheckingPolicy>
V8_INLINE void Swap(BasicMember<T, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other) {
OtherCheckingPolicy, StorageType>& other) {
auto tmp = GetRawStorage();
*this = other;
other = tmp;
}
V8_INLINE explicit operator bool() const { return !IsCleared(); }
V8_INLINE explicit operator bool() const { return !Base::IsCleared(); }
V8_INLINE operator T*() const { return Get(); }
V8_INLINE T* operator->() const { return Get(); }
V8_INLINE T& operator*() const { return *Get(); }
@ -264,10 +267,12 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
// The const_cast below removes the constness from MemberBase storage. The
// following static_cast re-adds any constness if specified through the
// user-visible template parameter T.
return static_cast<T*>(const_cast<void*>(MemberBase::GetRaw()));
return static_cast<T*>(const_cast<void*>(Base::GetRaw()));
}
V8_INLINE void Clear() { SetRawStorageAtomic(RawStorage{}); }
V8_INLINE void Clear() {
Base::SetRawStorageAtomic(RawStorage{});
}
V8_INLINE T* Release() {
T* result = Get();
@ -276,41 +281,44 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
}
V8_INLINE const T** GetSlotForTesting() const {
return reinterpret_cast<const T**>(GetRawSlot());
return reinterpret_cast<const T**>(Base::GetRawSlot());
}
V8_INLINE RawStorage GetRawStorage() const {
return MemberBase::GetRawStorage();
return Base::GetRawStorage();
}
private:
V8_INLINE explicit BasicMember(RawStorage raw) : MemberBase(raw) {
V8_INLINE explicit BasicMember(RawStorage raw) : Base(raw) {
InitializingWriteBarrier(Get());
this->CheckPointer(Get());
}
V8_INLINE BasicMember& operator=(RawStorage other) {
SetRawStorageAtomic(other);
Base::SetRawStorageAtomic(other);
AssigningWriteBarrier();
this->CheckPointer(Get());
return *this;
}
V8_INLINE const T* GetRawAtomic() const {
return static_cast<const T*>(MemberBase::GetRawAtomic());
return static_cast<const T*>(Base::GetRawAtomic());
}
V8_INLINE void InitializingWriteBarrier(T* value) const {
WriteBarrierPolicy::InitializingBarrier(GetRawSlot(), value);
WriteBarrierPolicy::InitializingBarrier(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier(T* value) const {
WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), value);
WriteBarrierPolicy::template AssigningBarrier<
StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier() const {
WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), GetRawStorage());
WriteBarrierPolicy::template AssigningBarrier<
StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(),
Base::GetRawStorage());
}
V8_INLINE void ClearFromGC() const { MemberBase::ClearFromGC(); }
V8_INLINE void ClearFromGC() const { Base::ClearFromGC(); }
V8_INLINE T* GetFromGC() const { return Get(); }
@ -319,19 +327,20 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U>
friend struct cppgc::TraceTrait;
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1>
typename CheckingPolicy1, typename StorageType1>
friend class BasicMember;
};
// Member equality operators.
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator==(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
if constexpr (internal::IsDecayedSameV<T1, T2>) {
// Check compressed pointers if types are the same.
return member1.GetRawStorage() == member2.GetRawStorage();
@ -345,31 +354,32 @@ V8_INLINE bool operator==(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator!=(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
return !(member1 == member2);
}
// Equality with raw pointers.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy, typename U>
V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
U* raw) {
typename CheckingPolicy, typename StorageType, typename U>
V8_INLINE bool operator==(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
U* raw) {
// Never allow comparison with erased pointers.
static_assert(!internal::IsDecayedSameV<void, U>);
if constexpr (internal::IsDecayedSameV<T, U>) {
// Check compressed pointers if types are the same.
return member.GetRawStorage() == MemberBase::RawStorage(raw);
return member.GetRawStorage() == StorageType(raw);
} else if constexpr (internal::IsStrictlyBaseOfV<T, U>) {
// Cast the raw pointer to T, which may adjust the pointer.
return member.GetRawStorage() ==
MemberBase::RawStorage(static_cast<T*>(raw));
return member.GetRawStorage() == StorageType(static_cast<T*>(raw));
} else {
// Otherwise, decompressed the member.
return member.Get() == raw;
@ -377,104 +387,112 @@ V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy, typename U>
V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
U* raw) {
typename CheckingPolicy, typename StorageType, typename U>
V8_INLINE bool operator!=(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
U* raw) {
return !(member == raw);
}
template <typename T, typename U, typename WeaknessTag,
typename WriteBarrierPolicy, typename CheckingPolicy>
V8_INLINE bool operator==(T* raw,
const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename WriteBarrierPolicy, typename CheckingPolicy,
typename StorageType>
V8_INLINE bool operator==(
T* raw, const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return member == raw;
}
template <typename T, typename U, typename WeaknessTag,
typename WriteBarrierPolicy, typename CheckingPolicy>
V8_INLINE bool operator!=(T* raw,
const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename WriteBarrierPolicy, typename CheckingPolicy,
typename StorageType>
V8_INLINE bool operator!=(
T* raw, const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return !(raw == member);
}
// Equality with sentinel.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
SentinelPointer) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
SentinelPointer) {
return member.GetRawStorage().IsSentinel();
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
SentinelPointer s) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
SentinelPointer s) {
return !(member == s);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(SentinelPointer s,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
SentinelPointer s, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return member == s;
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(SentinelPointer s,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
SentinelPointer s, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return !(s == member);
}
// Equality with nullptr.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
std::nullptr_t) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
std::nullptr_t) {
return !static_cast<bool>(member);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
std::nullptr_t n) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
std::nullptr_t n) {
return !(member == n);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(std::nullptr_t n,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
std::nullptr_t n, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return member == n;
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(std::nullptr_t n,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
std::nullptr_t n, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return !(n == member);
}
// Relational operators.
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator<(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@ -483,12 +501,13 @@ V8_INLINE bool operator<(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator<=(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@ -497,12 +516,13 @@ V8_INLINE bool operator<=(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator>(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@ -511,21 +531,23 @@ V8_INLINE bool operator>(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator>=(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
return member1.GetRawStorage() >= member2.GetRawStorage();
}
template <typename T, typename WriteBarrierPolicy, typename CheckingPolicy>
struct IsWeak<
internal::BasicMember<T, WeakMemberTag, WriteBarrierPolicy, CheckingPolicy>>
template <typename T, typename WriteBarrierPolicy, typename CheckingPolicy,
typename StorageType>
struct IsWeak<internal::BasicMember<T, WeakMemberTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>>
: std::true_type {};
} // namespace internal
@ -536,8 +558,9 @@ struct IsWeak<
* trace method.
*/
template <typename T>
using Member = internal::BasicMember<T, internal::StrongMemberTag,
internal::DijkstraWriteBarrierPolicy>;
using Member = internal::BasicMember<
T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
/**
* WeakMember is similar to Member in that it is used to point to other garbage
@ -548,8 +571,9 @@ using Member = internal::BasicMember<T, internal::StrongMemberTag,
* will automatically be set to null.
*/
template <typename T>
using WeakMember = internal::BasicMember<T, internal::WeakMemberTag,
internal::DijkstraWriteBarrierPolicy>;
using WeakMember = internal::BasicMember<
T, internal::WeakMemberTag, internal::DijkstraWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
/**
* UntracedMember is a pointer to an on-heap object that is not traced for some
@ -558,8 +582,22 @@ using WeakMember = internal::BasicMember<T, internal::WeakMemberTag,
* must be kept alive through other means.
*/
template <typename T>
using UntracedMember = internal::BasicMember<T, internal::UntracedMemberTag,
internal::NoWriteBarrierPolicy>;
using UntracedMember = internal::BasicMember<
T, internal::UntracedMemberTag, internal::NoWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
namespace subtle {
/**
* UncompressedMember. Use with care in hot paths that would otherwise cause
* many decompression cycles.
*/
template <typename T>
using UncompressedMember = internal::BasicMember<
T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::RawPointer>;
} // namespace subtle
} // namespace cppgc

View file

@ -114,11 +114,12 @@ class BasicPersistent final : public PersistentBase,
// Constructor from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicPersistent(
const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>& member,
const SourceLocation& loc = SourceLocation::Current())
BasicPersistent(const internal::BasicMember<
U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy, MemberStorageType>& member,
const SourceLocation& loc = SourceLocation::Current())
: BasicPersistent(member.Get(), loc) {}
~BasicPersistent() { Clear(); }
@ -154,10 +155,12 @@ class BasicPersistent final : public PersistentBase,
// Assignment from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicPersistent& operator=(
const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>& member) {
MemberCheckingPolicy, MemberStorageType>&
member) {
return operator=(member.Get());
}
@ -286,36 +289,39 @@ bool operator!=(const BasicPersistent<T1, WeaknessPolicy1, LocationPolicy1,
template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy>
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType>
bool operator==(
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p,
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m) {
MemberCheckingPolicy, MemberStorageType>& m) {
return p.Get() == m.Get();
}
template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy>
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType>
bool operator!=(
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p,
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m) {
MemberCheckingPolicy, MemberStorageType>& m) {
return !(p == m);
}
template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename T2, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
typename MemberStorageType, typename T2,
typename PersistentWeaknessPolicy, typename PersistentLocationPolicy,
typename PersistentCheckingPolicy>
bool operator==(
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m,
MemberCheckingPolicy, MemberStorageType>& m,
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p) {
@ -324,11 +330,12 @@ bool operator==(
template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename T2, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
typename MemberStorageType, typename T2,
typename PersistentWeaknessPolicy, typename PersistentLocationPolicy,
typename PersistentCheckingPolicy>
bool operator!=(
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m,
MemberCheckingPolicy, MemberStorageType>& m,
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p) {

View file

@ -16,7 +16,7 @@ class Visitor;
namespace internal {
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
typename CheckingPolicy, typename StorageType>
class BasicMember;
struct DijkstraWriteBarrierPolicy;
struct NoWriteBarrierPolicy;
@ -126,9 +126,10 @@ template <typename BasicMemberCandidate, typename WeaknessTag,
typename WriteBarrierPolicy>
struct IsSubclassOfBasicMemberTemplate {
private:
template <typename T, typename CheckingPolicy>
template <typename T, typename CheckingPolicy, typename StorageType>
static std::true_type SubclassCheck(
BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy>*);
BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>*);
static std::false_type SubclassCheck(...);
public:

View file

@ -99,6 +99,20 @@ class V8_EXPORT Visitor {
&HandleWeak<WeakMember<T>>, &weak_member);
}
#if defined(CPPGC_POINTER_COMPRESSION)
/**
* Trace method for UncompressedMember.
*
* \param member UncompressedMember reference retaining an object.
*/
template <typename T>
void Trace(const subtle::UncompressedMember<T>& member) {
const T* value = member.GetRawAtomic();
CPPGC_DCHECK(value != kSentinelPointer);
TraceImpl(value);
}
#endif // defined(CPPGC_POINTER_COMPRESSION)
/**
* Trace method for inlined objects that are not allocated themselves but
* otherwise follow managed heap layout and have a Trace() method.
@ -229,7 +243,8 @@ class V8_EXPORT Visitor {
}
/**
* Trace method for retaining containers weakly.
* Trace method for retaining containers weakly. Note that weak containers
* should emit write barriers.
*
* \param object reference to the container.
* \param callback to be invoked.

View file

@ -511,6 +511,7 @@ domain Debugger
CompileError
BlockedByActiveGenerator
BlockedByActiveFunction
BlockedByTopLevelEsModuleChange
# Exception details if any. Only present when `status` is `CompileError`.
optional Runtime.ExceptionDetails exceptionDetails
@ -1402,6 +1403,13 @@ domain Runtime
optional string objectGroup
# Whether to throw an exception if side effect cannot be ruled out during evaluation.
experimental optional boolean throwOnSideEffect
# An alternative way to specify the execution context to call function on.
# Compared to contextId that may be reused across processes, this is guaranteed to be
# system-unique, so it can be used to prevent accidental function call
# in context different than intended (e.g. as a result of navigation across process
# boundaries).
# This is mutually exclusive with `executionContextId`.
experimental optional string uniqueContextId
# Whether the result should contain `webDriverValue`, serialized according to
# https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but
# resulting `objectId` is still provided.
@ -1734,7 +1742,9 @@ domain Runtime
event executionContextDestroyed
parameters
# Id of the destroyed context
ExecutionContextId executionContextId
deprecated ExecutionContextId executionContextId
# Unique Id of the destroyed context
experimental string executionContextUniqueId
# Issued when all executionContexts were cleared in browser
event executionContextsCleared

View file

@ -282,12 +282,12 @@ class V8_PLATFORM_EXPORT TracingController
const char* name, uint64_t handle) override;
static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
#endif // !defined(V8_USE_PERFETTO)
void AddTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) override;
void RemoveTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) override;
#endif // !defined(V8_USE_PERFETTO)
void StartTracing(TraceConfig* trace_config);
void StopTracing();
@ -307,7 +307,6 @@ class V8_PLATFORM_EXPORT TracingController
std::unique_ptr<base::Mutex> mutex_;
std::unique_ptr<TraceConfig> trace_config_;
std::atomic_bool recording_{false};
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
#if defined(V8_USE_PERFETTO)
std::ostream* output_stream_ = nullptr;
@ -316,6 +315,7 @@ class V8_PLATFORM_EXPORT TracingController
TraceEventListener* listener_for_testing_ = nullptr;
std::unique_ptr<perfetto::TracingSession> tracing_session_;
#else // !defined(V8_USE_PERFETTO)
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
std::unique_ptr<TraceBuffer> trace_buffer_;
#endif // !defined(V8_USE_PERFETTO)

View file

@ -53,12 +53,28 @@ class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
*/
size_t ByteLength() const;
/**
* The maximum length (in bytes) that this backing store may grow to.
*
* If this backing store was created for a resizable ArrayBuffer or a growable
* SharedArrayBuffer, it is >= ByteLength(). Otherwise it is ==
* ByteLength().
*/
size_t MaxByteLength() const;
/**
* Indicates whether the backing store was created for an ArrayBuffer or
* a SharedArrayBuffer.
*/
bool IsShared() const;
/**
* Indicates whether the backing store was created for a resizable ArrayBuffer
* or a growable SharedArrayBuffer, and thus may be resized by user JavaScript
* code.
*/
bool IsResizableByUserJavaScript() const;
/**
* Prevent implicit instantiation of operator delete with size_t argument.
* The size_t argument would be incorrect because ptr points to the
@ -189,6 +205,11 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
size_t ByteLength() const;
/**
* Maximum length in bytes.
*/
size_t MaxByteLength() const;
/**
* Create a new ArrayBuffer. Allocate |byte_length| bytes.
* Allocated memory will be owned by a created ArrayBuffer and
@ -235,6 +256,21 @@ class V8_EXPORT ArrayBuffer : public Object {
void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
void* deleter_data);
/**
* Returns a new resizable standalone BackingStore that is allocated using the
* array buffer allocator of the isolate. The result can be later passed to
* ArrayBuffer::New.
*
* |byte_length| must be <= |max_byte_length|.
*
* This function is usable without an isolate. Unlike |NewBackingStore| calls
* with an isolate, GCs cannot be triggered, and there are no
* retries. Allocation failure will cause the function to crash with an
* out-of-memory error.
*/
static std::unique_ptr<BackingStore> NewResizableBackingStore(
size_t byte_length, size_t max_byte_length);
/**
* Returns true if this ArrayBuffer may be detached.
*/
@ -392,6 +428,11 @@ class V8_EXPORT SharedArrayBuffer : public Object {
*/
size_t ByteLength() const;
/**
* Maximum length in bytes.
*/
size_t MaxByteLength() const;
/**
* Create a new SharedArrayBuffer. Allocate |byte_length| bytes.
* Allocated memory will be owned by a created SharedArrayBuffer and

View file

@ -7,6 +7,7 @@
#include <stddef.h>
#include <functional>
#include <string>
#include "cppgc/common.h"
@ -328,6 +329,10 @@ using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if WebAssembly exceptions are enabled ---
using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if WebAssembly GC is enabled ---
// If the callback returns true, it will also enable Wasm stringrefs.
using WasmGCEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
using SharedArrayBufferConstructorEnabledCallback =
bool (*)(Local<Context> context);
@ -368,6 +373,13 @@ using HostImportModuleDynamicallyCallback = MaybeLocal<Promise> (*)(
Local<Value> resource_name, Local<String> specifier,
Local<FixedArray> import_assertions);
/**
* Callback for requesting a compile hint for a function from the embedder. The
* first parameter is the position of the function in source code and the second
* parameter is embedder data to be passed back.
*/
using CompileHintCallback = bool (*)(int, void*);
/**
* HostInitializeImportMetaObjectCallback is called the first time import.meta
* is accessed for a module. Subsequent access will reuse the same value.

View file

@ -7,8 +7,11 @@
#include <stdint.h>
#include <vector>
#include "v8-data.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8-maybe.h" // NOLINT(build/include_directory)
#include "v8-snapshot.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
@ -163,6 +166,42 @@ class V8_EXPORT Context : public Data {
*/
void Exit();
/**
* Delegate to help with Deep freezing embedder-specific objects (such as
* JSApiObjects) that can not be frozen natively.
*/
class DeepFreezeDelegate {
public:
/**
* Performs embedder-specific operations to freeze the provided embedder
* object. The provided object *will* be frozen by DeepFreeze after this
* function returns, so only embedder-specific objects need to be frozen.
* This function *may not* create new JS objects or perform JS allocations.
* Any v8 objects reachable from the provided embedder object that should
* also be considered for freezing should be added to the children_out
* parameter. Returns true if the operation completed successfully.
*/
virtual bool FreezeEmbedderObjectAndGetChildren(
Local<Object> obj, std::vector<Local<Object>>& children_out) = 0;
};
/**
* Attempts to recursively freeze all objects reachable from this context.
* Some objects (generators, iterators, non-const closures) can not be frozen
* and will cause this method to throw an error. An optional delegate can be
* provided to help freeze embedder-specific objects.
*
* Freezing occurs in two steps:
* 1. "Marking" where we iterate through all objects reachable by this
* context, accumulating a list of objects that need to be frozen and
* looking for objects that can't be frozen. This step is separated because
* it is more efficient when we can assume there is no garbage collection.
* 2. "Freezing" where we go through the list of objects and freezing them.
* This effectively requires copying them so it may trigger garbage
* collection.
*/
Maybe<void> DeepFreeze(DeepFreezeDelegate* delegate = nullptr);
/** Returns the isolate associated with a current context. */
Isolate* GetIsolate();
@ -365,13 +404,18 @@ Local<Value> Context::GetEmbedderData(int index) {
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value =
I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
value = I::DecompressTaggedField(embedder_data, static_cast<uint32_t>(value));
#endif
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
return Local<Value>(reinterpret_cast<Value*>(value));
#else
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
*reinterpret_cast<A*>(this));
A* result = HandleScope::CreateHandle(isolate, value);
return Local<Value>(reinterpret_cast<Value*>(result));
#endif
#else
return SlowGetEmbedderData(index);
#endif
@ -381,7 +425,7 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
#if !defined(V8_ENABLE_CHECKS)
using A = internal::Address;
using I = internal::Internals;
A ctx = *reinterpret_cast<const A*>(this);
A ctx = internal::ValueHelper::ValueAsAddress(this);
A embedder_data =
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset = I::kEmbedderDataArrayHeaderSize +

View file

@ -77,6 +77,12 @@ struct WrapperDescriptor final {
};
struct V8_EXPORT CppHeapCreateParams {
CppHeapCreateParams(
std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces,
WrapperDescriptor wrapper_descriptor)
: custom_spaces(std::move(custom_spaces)),
wrapper_descriptor(wrapper_descriptor) {}
CppHeapCreateParams(const CppHeapCreateParams&) = delete;
CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete;

View file

@ -5,27 +5,14 @@
#ifndef INCLUDE_V8_EMBEDDER_HEAP_H_
#define INCLUDE_V8_EMBEDDER_HEAP_H_
#include <stddef.h>
#include <stdint.h>
#include <utility>
#include <vector>
#include "cppgc/common.h"
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8-traced-handle.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
class Data;
class Isolate;
class Value;
namespace internal {
class LocalEmbedderHeapTracer;
} // namespace internal
/**
* Handler for embedder roots on non-unified heap garbage collections.
*/
@ -62,162 +49,6 @@ class V8_EXPORT EmbedderRootsHandler {
virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
};
/**
* Interface for tracing through the embedder heap. During a V8 garbage
* collection, V8 collects hidden fields of all potential wrappers, and at the
* end of its marking phase iterates the collection and asks the embedder to
* trace through its heap and use reporter to report each JavaScript object
* reachable from any of the given wrappers.
*/
class V8_EXPORT
// GCC doesn't like combining __attribute__(()) with [[deprecated]].
#ifdef __clang__
V8_DEPRECATED("Use CppHeap when working with v8::TracedReference.")
#endif // __clang__
EmbedderHeapTracer {
public:
using EmbedderStackState = cppgc::EmbedderStackState;
enum TraceFlags : uint64_t {
kNoFlags = 0,
kReduceMemory = 1 << 0,
kForced = 1 << 2,
};
/**
* Interface for iterating through |TracedReference| handles.
*/
class V8_EXPORT TracedGlobalHandleVisitor {
public:
virtual ~TracedGlobalHandleVisitor() = default;
virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
};
/**
* Summary of a garbage collection cycle. See |TraceEpilogue| on how the
* summary is reported.
*/
struct TraceSummary {
/**
* Time spent managing the retained memory in milliseconds. This can e.g.
* include the time tracing through objects in the embedder.
*/
double time = 0.0;
/**
* Memory retained by the embedder through the |EmbedderHeapTracer|
* mechanism in bytes.
*/
size_t allocated_size = 0;
};
virtual ~EmbedderHeapTracer() = default;
/**
* Iterates all |TracedReference| handles created for the |v8::Isolate| the
* tracer is attached to.
*/
void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
/**
* Called by the embedder to set the start of the stack which is e.g. used by
* V8 to determine whether handles are used from stack or heap.
*/
void SetStackStart(void* stack_start);
/**
* Called by v8 to register internal fields of found wrappers.
*
* The embedder is expected to store them somewhere and trace reachable
* wrappers from them when called through |AdvanceTracing|.
*/
virtual void RegisterV8References(
const std::vector<std::pair<void*, void*>>& embedder_fields) = 0;
void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
/**
* Called at the beginning of a GC cycle.
*/
virtual void TracePrologue(TraceFlags flags) {}
/**
* Called to advance tracing in the embedder.
*
* The embedder is expected to trace its heap starting from wrappers reported
* by RegisterV8References method, and report back all reachable wrappers.
* Furthermore, the embedder is expected to stop tracing by the given
* deadline. A deadline of infinity means that tracing should be finished.
*
* Returns |true| if tracing is done, and false otherwise.
*/
virtual bool AdvanceTracing(double deadline_in_ms) = 0;
/*
* Returns true if there no more tracing work to be done (see AdvanceTracing)
* and false otherwise.
*/
virtual bool IsTracingDone() = 0;
/**
* Called at the end of a GC cycle.
*
* Note that allocation is *not* allowed within |TraceEpilogue|. Can be
* overriden to fill a |TraceSummary| that is used by V8 to schedule future
* garbage collections.
*/
virtual void TraceEpilogue(TraceSummary* trace_summary) {}
/**
* Called upon entering the final marking pause. No more incremental marking
* steps will follow this call.
*/
virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
/*
* Called by the embedder to request immediate finalization of the currently
* running tracing phase that has been started with TracePrologue and not
* yet finished with TraceEpilogue.
*
* Will be a noop when currently not in tracing.
*
* This is an experimental feature.
*/
void FinalizeTracing();
/**
* See documentation on EmbedderRootsHandler.
*/
virtual bool IsRootForNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
/**
* See documentation on EmbedderRootsHandler.
*/
virtual void ResetHandleInNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
/*
* Called by the embedder to signal newly allocated or freed memory. Not bound
* to tracing phases. Embedders should trade off when increments are reported
* as V8 may consult global heuristics on whether to trigger garbage
* collection on this change.
*/
void IncreaseAllocatedSize(size_t bytes);
void DecreaseAllocatedSize(size_t bytes);
/*
* Returns the v8::Isolate this tracer is attached too and |nullptr| if it
* is not attached to any v8::Isolate.
*/
v8::Isolate* isolate() const { return v8_isolate_; }
protected:
v8::Isolate* v8_isolate_ = nullptr;
friend class internal::LocalEmbedderHeapTracer;
};
} // namespace v8
#endif // INCLUDE_V8_EMBEDDER_HEAP_H_

View file

@ -247,6 +247,7 @@ class CTypeInfo {
kUint64,
kFloat32,
kFloat64,
kPointer,
kV8Value,
kSeqOneByteString,
kApiObject, // This will be deprecated once all users have
@ -435,6 +436,7 @@ struct AnyCType {
uint64_t uint64_value;
float float_value;
double double_value;
void* pointer_value;
Local<Object> object_value;
Local<Array> sequence_value;
const FastApiTypedArray<uint8_t>* uint8_ta_value;
@ -620,6 +622,7 @@ class CFunctionInfoImpl : public CFunctionInfo {
kReturnType == CTypeInfo::Type::kUint32 ||
kReturnType == CTypeInfo::Type::kFloat32 ||
kReturnType == CTypeInfo::Type::kFloat64 ||
kReturnType == CTypeInfo::Type::kPointer ||
kReturnType == CTypeInfo::Type::kAny,
"64-bit int, string and api object values are not currently "
"supported return types.");
@ -658,13 +661,14 @@ struct CTypeInfoTraits {};
#define PRIMITIVE_C_TYPES(V) \
V(bool, kBool) \
V(uint8_t, kUint8) \
V(int32_t, kInt32) \
V(uint32_t, kUint32) \
V(int64_t, kInt64) \
V(uint64_t, kUint64) \
V(float, kFloat32) \
V(double, kFloat64) \
V(uint8_t, kUint8)
V(void*, kPointer)
// Same as above, but includes deprecated types for compatibility.
#define ALL_C_TYPES(V) \
@ -698,13 +702,13 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS)
};
#define TYPED_ARRAY_C_TYPES(V) \
V(uint8_t, kUint8) \
V(int32_t, kInt32) \
V(uint32_t, kUint32) \
V(int64_t, kInt64) \
V(uint64_t, kUint64) \
V(float, kFloat32) \
V(double, kFloat64) \
V(uint8_t, kUint8)
V(double, kFloat64)
TYPED_ARRAY_C_TYPES(SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA)

View file

@ -21,6 +21,7 @@ class Value;
namespace internal {
class FunctionCallbackArguments;
class PropertyCallbackArguments;
class Builtins;
} // namespace internal
namespace debug {
@ -74,6 +75,11 @@ class ReturnValue {
V8_INLINE void SetInternal(internal::Address value) { *value_ = value; }
V8_INLINE internal::Address GetDefaultValue();
V8_INLINE explicit ReturnValue(internal::Address* slot);
// See FunctionCallbackInfo.
static constexpr int kIsolateValueIndex = -2;
static constexpr int kDefaultValueValueIndex = -1;
internal::Address* value_;
};
@ -116,19 +122,35 @@ class FunctionCallbackInfo {
V8_INLINE Isolate* GetIsolate() const;
/** The ReturnValue for the call. */
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 6;
protected:
private:
friend class internal::FunctionCallbackArguments;
friend class internal::CustomArguments<FunctionCallbackInfo>;
friend class debug::ConsoleCallArguments;
static const int kHolderIndex = 0;
static const int kIsolateIndex = 1;
static const int kReturnValueDefaultValueIndex = 2;
static const int kReturnValueIndex = 3;
static const int kDataIndex = 4;
static const int kNewTargetIndex = 5;
friend class internal::Builtins;
static constexpr int kHolderIndex = 0;
static constexpr int kIsolateIndex = 1;
static constexpr int kReturnValueDefaultValueIndex = 2;
static constexpr int kReturnValueIndex = 3;
static constexpr int kDataIndex = 4;
static constexpr int kNewTargetIndex = 5;
static constexpr int kArgsLength = 6;
static constexpr int kArgsLengthWithReceiver = 7;
// Codegen constants:
static constexpr int kSize = 3 * internal::kApiSystemPointerSize;
static constexpr int kImplicitArgsOffset = 0;
static constexpr int kValuesOffset =
kImplicitArgsOffset + internal::kApiSystemPointerSize;
static constexpr int kLengthOffset =
kValuesOffset + internal::kApiSystemPointerSize;
static constexpr int kThisValuesIndex = -1;
static_assert(ReturnValue<Value>::kDefaultValueValueIndex ==
kReturnValueDefaultValueIndex - kReturnValueIndex);
static_assert(ReturnValue<Value>::kIsolateValueIndex ==
kIsolateIndex - kReturnValueIndex);
V8_INLINE FunctionCallbackInfo(internal::Address* implicit_args,
internal::Address* values, int length);
@ -229,22 +251,24 @@ class PropertyCallbackInfo {
*/
V8_INLINE bool ShouldThrowOnError() const;
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 7;
protected:
private:
friend class MacroAssembler;
friend class internal::PropertyCallbackArguments;
friend class internal::CustomArguments<PropertyCallbackInfo>;
static const int kShouldThrowOnErrorIndex = 0;
static const int kHolderIndex = 1;
static const int kIsolateIndex = 2;
static const int kReturnValueDefaultValueIndex = 3;
static const int kReturnValueIndex = 4;
static const int kDataIndex = 5;
static const int kThisIndex = 6;
static constexpr int kShouldThrowOnErrorIndex = 0;
static constexpr int kHolderIndex = 1;
static constexpr int kIsolateIndex = 2;
static constexpr int kReturnValueDefaultValueIndex = 3;
static constexpr int kReturnValueIndex = 4;
static constexpr int kDataIndex = 5;
static constexpr int kThisIndex = 6;
V8_INLINE PropertyCallbackInfo(internal::Address* args) : args_(args) {}
static constexpr int kArgsLength = 7;
static constexpr int kSize = 1 * internal::kApiSystemPointerSize;
V8_INLINE explicit PropertyCallbackInfo(internal::Address* args)
: args_(args) {}
internal::Address* args_;
};
@ -285,7 +309,7 @@ void ReturnValue<T>::Set(const Local<S> handle) {
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
*value_ = *reinterpret_cast<internal::Address*>(*handle);
*value_ = internal::ValueHelper::ValueAsAddress(*handle);
}
}
@ -328,41 +352,46 @@ void ReturnValue<T>::Set(bool value) {
} else {
root_index = I::kFalseValueRootIndex;
}
*value_ = *I::GetRoot(GetIsolate(), root_index);
*value_ = I::GetRoot(GetIsolate(), root_index);
}
template <typename T>
void ReturnValue<T>::SetNull() {
static_assert(std::is_base_of<T, Primitive>::value, "type check");
using I = internal::Internals;
*value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
*value_ = I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
}
template <typename T>
void ReturnValue<T>::SetUndefined() {
static_assert(std::is_base_of<T, Primitive>::value, "type check");
using I = internal::Internals;
*value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
*value_ = I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
}
template <typename T>
void ReturnValue<T>::SetEmptyString() {
static_assert(std::is_base_of<T, String>::value, "type check");
using I = internal::Internals;
*value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
*value_ = I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
}
template <typename T>
Isolate* ReturnValue<T>::GetIsolate() const {
// Isolate is always the pointer below the default value on the stack.
return *reinterpret_cast<Isolate**>(&value_[-2]);
return *reinterpret_cast<Isolate**>(&value_[kIsolateValueIndex]);
}
template <typename T>
Local<Value> ReturnValue<T>::Get() const {
using I = internal::Internals;
if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
return Local<Value>(*Undefined(GetIsolate()));
#if V8_STATIC_ROOTS_BOOL
if (I::is_identical(*value_, I::StaticReadOnlyRoot::kTheHoleValue)) {
#else
if (*value_ == I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex)) {
#endif
return Undefined(GetIsolate());
}
return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
}
@ -375,7 +404,7 @@ void ReturnValue<T>::Set(S* whatever) {
template <typename T>
internal::Address ReturnValue<T>::GetDefaultValue() {
// Default value is always the pointer below value_ on the stack.
return value_[-1];
return value_[kDefaultValueValueIndex];
}
template <typename T>
@ -387,31 +416,29 @@ FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
template <typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
// values_ points to the first argument (not the receiver).
if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
return Local<Value>(reinterpret_cast<Value*>(values_ + i));
if (i < 0 || length_ <= i) return Undefined(GetIsolate());
return Local<Value>::FromSlot(values_ + i);
}
template <typename T>
Local<Object> FunctionCallbackInfo<T>::This() const {
// values_ points to the first argument (not the receiver).
return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
return Local<Object>::FromSlot(values_ + kThisValuesIndex);
}
template <typename T>
Local<Object> FunctionCallbackInfo<T>::Holder() const {
return Local<Object>(
reinterpret_cast<Object*>(&implicit_args_[kHolderIndex]));
return Local<Object>::FromSlot(&implicit_args_[kHolderIndex]);
}
template <typename T>
Local<Value> FunctionCallbackInfo<T>::NewTarget() const {
return Local<Value>(
reinterpret_cast<Value*>(&implicit_args_[kNewTargetIndex]));
return Local<Value>::FromSlot(&implicit_args_[kNewTargetIndex]);
}
template <typename T>
Local<Value> FunctionCallbackInfo<T>::Data() const {
return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
return Local<Value>::FromSlot(&implicit_args_[kDataIndex]);
}
template <typename T>
@ -441,17 +468,17 @@ Isolate* PropertyCallbackInfo<T>::GetIsolate() const {
template <typename T>
Local<Value> PropertyCallbackInfo<T>::Data() const {
return Local<Value>(reinterpret_cast<Value*>(&args_[kDataIndex]));
return Local<Value>::FromSlot(&args_[kDataIndex]);
}
template <typename T>
Local<Object> PropertyCallbackInfo<T>::This() const {
return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
return Local<Object>::FromSlot(&args_[kThisIndex]);
}
template <typename T>
Local<Object> PropertyCallbackInfo<T>::Holder() const {
return Local<Object>(reinterpret_cast<Object*>(&args_[kHolderIndex]));
return Local<Object>::FromSlot(&args_[kHolderIndex]);
}
template <typename T>

View file

@ -59,6 +59,7 @@ class V8_EXPORT Function : public Object {
void SetName(Local<String> name);
Local<Value> GetName() const;
V8_DEPRECATED("No direct replacement")
MaybeLocal<UnboundScript> GetUnboundScript() const;
/**

View file

@ -32,19 +32,19 @@ namespace Debugger {
namespace API {
class SearchMatch;
}
}
} // namespace Debugger
namespace Runtime {
namespace API {
class RemoteObject;
class StackTrace;
class StackTraceId;
}
}
} // namespace API
} // namespace Runtime
namespace Schema {
namespace API {
class Domain;
}
}
} // namespace Schema
} // namespace protocol
class V8_EXPORT StringView {
@ -134,6 +134,13 @@ class V8_EXPORT V8DebuggerId {
int64_t m_second = 0;
};
struct V8_EXPORT V8StackFrame {
StringView sourceURL;
StringView functionName;
int lineNumber;
int columnNumber;
};
class V8_EXPORT V8StackTrace {
public:
virtual StringView firstNonEmptySourceURL() const = 0;
@ -151,6 +158,8 @@ class V8_EXPORT V8StackTrace {
// Safe to pass between threads, drops async chain.
virtual std::unique_ptr<V8StackTrace> clone() = 0;
virtual std::vector<V8StackFrame> frames() const = 0;
};
class V8_EXPORT V8InspectorSession {
@ -203,6 +212,9 @@ class V8_EXPORT V8InspectorSession {
std::unique_ptr<StringBuffer>* objectGroup) = 0;
virtual void releaseObjectGroup(StringView) = 0;
virtual void triggerPreciseCoverageDeltaUpdate(StringView occasion) = 0;
// Prepare for shutdown (disables debugger pausing, etc.).
virtual void stop() = 0;
};
class V8_EXPORT WebDriverValue {
@ -365,9 +377,12 @@ class V8_EXPORT V8Inspector {
virtual void flushProtocolNotifications() = 0;
};
enum ClientTrustLevel { kUntrusted, kFullyTrusted };
enum SessionPauseState { kWaitingForDebugger, kNotWaitingForDebugger };
// TODO(chromium:1352175): remove default value once downstream change lands.
virtual std::unique_ptr<V8InspectorSession> connect(
int contextGroupId, Channel*, StringView state,
ClientTrustLevel client_trust_level) {
ClientTrustLevel client_trust_level,
SessionPauseState = kNotWaitingForDebugger) {
return nullptr;
}

View file

@ -21,15 +21,13 @@ class Array;
class Context;
class Data;
class Isolate;
template <typename T>
class Local;
namespace internal {
class Isolate;
typedef uintptr_t Address;
static const Address kNullAddress = 0;
static constexpr Address kNullAddress = 0;
constexpr int KB = 1024;
constexpr int MB = KB * 1024;
@ -82,7 +80,7 @@ struct SmiTagging<4> {
static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
V8_INLINE static int SmiToInt(const internal::Address value) {
V8_INLINE static int SmiToInt(Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Truncate and shift down (requires >> to be sign extending).
return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits;
@ -107,7 +105,7 @@ struct SmiTagging<8> {
static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
V8_INLINE static int SmiToInt(const internal::Address value) {
V8_INLINE static int SmiToInt(Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits);
@ -150,8 +148,9 @@ const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue);
constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
constexpr bool Is64() { return kApiSystemPointerSize == sizeof(int64_t); }
V8_INLINE static constexpr internal::Address IntToSmi(int value) {
V8_INLINE static constexpr Address IntToSmi(int value) {
return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) |
kSmiTag;
}
@ -242,6 +241,7 @@ static_assert(1ULL << (64 - kBoundedSizeShift) ==
#ifdef V8_COMPRESS_POINTERS
#ifdef V8_TARGET_OS_ANDROID
// The size of the virtual memory reservation for an external pointer table.
// This determines the maximum number of entries in a table. Using a maximum
// size allows omitting bounds checks on table accesses if the indices are
@ -249,14 +249,18 @@ static_assert(1ULL << (64 - kBoundedSizeShift) ==
// value must be a power of two.
static const size_t kExternalPointerTableReservationSize = 512 * MB;
// The maximum number of entries in an external pointer table.
static const size_t kMaxExternalPointers =
kExternalPointerTableReservationSize / kApiSystemPointerSize;
// The external pointer table indices stored in HeapObjects as external
// pointers are shifted to the left by this amount to guarantee that they are
// smaller than the maximum table size.
static const uint32_t kExternalPointerIndexShift = 6;
#else
static const size_t kExternalPointerTableReservationSize = 1024 * MB;
static const uint32_t kExternalPointerIndexShift = 5;
#endif // V8_TARGET_OS_ANDROID
// The maximum number of entries in an external pointer table.
static const size_t kMaxExternalPointers =
kExternalPointerTableReservationSize / kApiSystemPointerSize;
static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers,
"kExternalPointerTableReservationSize and "
"kExternalPointerIndexShift don't match");
@ -345,6 +349,14 @@ using ExternalPointer_t = Address;
// that the Embedder is not using this byte (really only this one bit) for any
// other purpose. This bit also does not collide with the memory tagging
// extension (MTE) which would use bits [56, 60).
//
// External pointer tables are also available even when the sandbox is off but
// pointer compression is on. In that case, the mechanism can be used to easy
// alignment requirements as it turns unaligned 64-bit raw pointers into
// aligned 32-bit indices. To "opt-in" to the external pointer table mechanism
// for this purpose, instead of using the ExternalPointer accessors one needs to
// use ExternalPointerHandles directly and use them to access the pointers in an
// ExternalPointerTable.
constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62;
constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000;
constexpr uint64_t kExternalPointerTagShift = 48;
@ -367,71 +379,58 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = {
0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001,
0b11100010, 0b11100100, 0b11101000, 0b11110000};
#define TAG(i) \
((kAllExternalPointerTypeTags[i] << kExternalPointerTagShift) | \
kExternalPointerMarkBit)
// clang-format off
// New entries should be added with state "sandboxed".
// When adding new tags, please ensure that the code using these tags is
// "substitution-safe", i.e. still operate safely if external pointers of the
// same type are swapped by an attacker. See comment above for more details.
#define TAG(i) (kAllExternalPointerTypeTags[i])
// Shared external pointers are owned by the shared Isolate and stored in the
// shared external pointer table associated with that Isolate, where they can
// be accessed from multiple threads at the same time. The objects referenced
// in this way must therefore always be thread-safe.
#define SHARED_EXTERNAL_POINTER_TAGS(V) \
V(kFirstSharedTag, sandboxed, TAG(0)) \
V(kWaiterQueueNodeTag, sandboxed, TAG(0)) \
V(kExternalStringResourceTag, sandboxed, TAG(1)) \
V(kExternalStringResourceDataTag, sandboxed, TAG(2)) \
V(kLastSharedTag, sandboxed, TAG(2))
#define SHARED_EXTERNAL_POINTER_TAGS(V) \
V(kFirstSharedTag, TAG(0)) \
V(kWaiterQueueNodeTag, TAG(0)) \
V(kExternalStringResourceTag, TAG(1)) \
V(kExternalStringResourceDataTag, TAG(2)) \
V(kLastSharedTag, TAG(2))
// External pointers using these tags are kept in a per-Isolate external
// pointer table and can only be accessed when this Isolate is active.
#define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \
V(kForeignForeignAddressTag, sandboxed, TAG(10)) \
V(kNativeContextMicrotaskQueueTag, sandboxed, TAG(11)) \
V(kEmbedderDataSlotPayloadTag, sandboxed, TAG(12)) \
V(kExternalObjectValueTag, sandboxed, TAG(13)) \
V(kCallHandlerInfoCallbackTag, sandboxed, TAG(14)) \
V(kAccessorInfoGetterTag, sandboxed, TAG(15)) \
V(kAccessorInfoSetterTag, sandboxed, TAG(16)) \
V(kWasmInternalFunctionCallTargetTag, sandboxed, TAG(17)) \
V(kWasmTypeInfoNativeTypeTag, sandboxed, TAG(18)) \
V(kWasmExportedFunctionDataSignatureTag, sandboxed, TAG(19)) \
V(kWasmContinuationJmpbufTag, sandboxed, TAG(20)) \
V(kArrayBufferExtensionTag, sandboxed, TAG(21))
#define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \
V(kForeignForeignAddressTag, TAG(10)) \
V(kNativeContextMicrotaskQueueTag, TAG(11)) \
V(kEmbedderDataSlotPayloadTag, TAG(12)) \
/* This tag essentially stands for a `void*` pointer in the V8 API, and */ \
/* it is the Embedder's responsibility to ensure type safety (against */ \
/* substitution) and lifetime validity of these objects. */ \
V(kExternalObjectValueTag, TAG(13)) \
V(kCallHandlerInfoCallbackTag, TAG(14)) \
V(kAccessorInfoGetterTag, TAG(15)) \
V(kAccessorInfoSetterTag, TAG(16)) \
V(kWasmInternalFunctionCallTargetTag, TAG(17)) \
V(kWasmTypeInfoNativeTypeTag, TAG(18)) \
V(kWasmExportedFunctionDataSignatureTag, TAG(19)) \
V(kWasmContinuationJmpbufTag, TAG(20)) \
V(kArrayBufferExtensionTag, TAG(21))
// All external pointer tags.
#define ALL_EXTERNAL_POINTER_TAGS(V) \
SHARED_EXTERNAL_POINTER_TAGS(V) \
PER_ISOLATE_EXTERNAL_POINTER_TAGS(V)
// When the sandbox is enabled, external pointers marked as "sandboxed" above
// use the external pointer table (i.e. are sandboxed). This allows a gradual
// rollout of external pointer sandboxing. If the sandbox is off, no external
// pointers are sandboxed.
//
// Sandboxed external pointer tags are available when compressing pointers even
// when the sandbox is off. Some tags (e.g. kWaiterQueueNodeTag) are used
// manually with the external pointer table even when the sandbox is off to ease
// alignment requirements.
#define sandboxed(X) (X << kExternalPointerTagShift) | kExternalPointerMarkBit
#define unsandboxed(X) kUnsandboxedExternalPointerTag
#if defined(V8_COMPRESS_POINTERS)
#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = State(Bits),
#else
#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = unsandboxed(Bits),
#endif
#define EXTERNAL_POINTER_TAG_ENUM(Name, Tag) Name = Tag,
#define MAKE_TAG(HasMarkBit, TypeTag) \
((static_cast<uint64_t>(TypeTag) << kExternalPointerTagShift) | \
(HasMarkBit ? kExternalPointerMarkBit : 0))
enum ExternalPointerTag : uint64_t {
// Empty tag value. Mostly used as placeholder.
kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
// Tag to use for unsandboxed external pointers, which are still stored as
// raw pointers on the heap.
kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000),
// External pointer tag that will match any external pointer. Use with care!
kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
// The free entry tag has all type bits set so every type check with a
@ -445,20 +444,11 @@ enum ExternalPointerTag : uint64_t {
};
#undef MAKE_TAG
#undef unsandboxed
#undef sandboxed
#undef TAG
#undef EXTERNAL_POINTER_TAG_ENUM
// clang-format on
// True if the external pointer is sandboxed and so must be referenced through
// an external pointer table.
V8_INLINE static constexpr bool IsSandboxedExternalPointerType(
ExternalPointerTag tag) {
return tag != kUnsandboxedExternalPointerTag;
}
// True if the external pointer must be accessed from the shared isolate's
// external pointer table.
V8_INLINE static constexpr bool IsSharedExternalPointerType(
@ -467,12 +457,10 @@ V8_INLINE static constexpr bool IsSharedExternalPointerType(
}
// Sanity checks.
#define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
static_assert(!IsSandboxedExternalPointerType(Tag) || \
IsSharedExternalPointerType(Tag));
#define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
static_assert(IsSharedExternalPointerType(Tag));
#define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
static_assert(!IsSandboxedExternalPointerType(Tag) || \
!IsSharedExternalPointerType(Tag));
static_assert(!IsSharedExternalPointerType(Tag));
SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS)
PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS)
@ -490,7 +478,7 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
// Returns if we need to throw when an error occurs. This infers the language
// mode based on the current context and the closure. This returns true if the
// language mode is strict.
V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
V8_EXPORT bool ShouldThrowOnError(internal::Isolate* isolate);
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@ -498,8 +486,7 @@ V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
*/
class Internals {
#ifdef V8_MAP_PACKING
V8_INLINE static constexpr internal::Address UnpackMapWord(
internal::Address mapword) {
V8_INLINE static constexpr Address UnpackMapWord(Address mapword) {
// TODO(wenyuzhao): Clear header metadata.
return mapword ^ kMapWordXorMask;
}
@ -533,6 +520,8 @@ class Internals {
static const int kStackGuardSize = 7 * kApiSystemPointerSize;
static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize;
static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize;
static const int kLinearAllocationAreaSize = 3 * kApiSystemPointerSize;
static const int kThreadLocalTopSize = 25 * kApiSystemPointerSize;
// ExternalPointerTable layout guarantees.
static const int kExternalPointerTableBufferOffset = 0;
@ -545,31 +534,60 @@ class Internals {
static const int kVariousBooleanFlagsOffset =
kIsolateStackGuardOffset + kStackGuardSize;
static const int kBuiltinTier0EntryTableOffset =
kVariousBooleanFlagsOffset + kApiSystemPointerSize;
kVariousBooleanFlagsOffset + 8;
static const int kBuiltinTier0TableOffset =
kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize;
static const int kIsolateEmbedderDataOffset =
static const int kNewAllocationInfoOffset =
kBuiltinTier0TableOffset + kBuiltinTier0TableSize;
static const int kOldAllocationInfoOffset =
kNewAllocationInfoOffset + kLinearAllocationAreaSize;
static const int kIsolateFastCCallCallerFpOffset =
kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
kOldAllocationInfoOffset + kLinearAllocationAreaSize;
static const int kIsolateFastCCallCallerPcOffset =
kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
static const int kIsolateFastApiCallTargetOffset =
kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
static const int kIsolateLongTaskStatsCounterOffset =
kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
static const int kIsolateThreadLocalTopOffset =
kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
static const int kIsolateEmbedderDataOffset =
kIsolateThreadLocalTopOffset + kThreadLocalTopSize;
#ifdef V8_COMPRESS_POINTERS
static const int kIsolateExternalPointerTableOffset =
kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
static const int kIsolateSharedExternalPointerTableAddressOffset =
kIsolateExternalPointerTableOffset + kExternalPointerTableSize;
static const int kIsolateRootsOffset =
kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize;
#else
static const int kIsolateRootsOffset =
kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
#endif
#if V8_STATIC_ROOTS_BOOL
// These constants need to be initialized in api.cc.
#define EXPORTED_STATIC_ROOTS_PTR_LIST(V) \
V(UndefinedValue) \
V(NullValue) \
V(TrueValue) \
V(FalseValue) \
V(EmptyString) \
V(TheHoleValue)
using Tagged_t = uint32_t;
struct StaticReadOnlyRoot {
#define DEF_ROOT(name) V8_EXPORT static const Tagged_t k##name;
EXPORTED_STATIC_ROOTS_PTR_LIST(DEF_ROOT)
#undef DEF_ROOT
V8_EXPORT static const Tagged_t kFirstStringMap;
V8_EXPORT static const Tagged_t kLastStringMap;
};
#endif // V8_STATIC_ROOTS_BOOL
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
static const int kNullValueRootIndex = 6;
@ -623,15 +641,15 @@ class Internals {
#endif
}
V8_INLINE static bool HasHeapObjectTag(const internal::Address value) {
V8_INLINE static bool HasHeapObjectTag(Address value) {
return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag);
}
V8_INLINE static int SmiValue(const internal::Address value) {
V8_INLINE static int SmiValue(Address value) {
return PlatformSmiTagging::SmiToInt(value);
}
V8_INLINE static constexpr internal::Address IntToSmi(int value) {
V8_INLINE static constexpr Address IntToSmi(int value) {
return internal::IntToSmi(value);
}
@ -639,16 +657,30 @@ class Internals {
return PlatformSmiTagging::IsValidSmi(value);
}
V8_INLINE static int GetInstanceType(const internal::Address obj) {
typedef internal::Address A;
A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
#if V8_STATIC_ROOTS_BOOL
V8_INLINE static bool is_identical(Address obj, Tagged_t constant) {
return static_cast<Tagged_t>(obj) == constant;
}
V8_INLINE static bool CheckInstanceMapRange(Address obj, Tagged_t first_map,
Tagged_t last_map) {
auto map = ReadRawField<Tagged_t>(obj, kHeapObjectMapOffset);
#ifdef V8_MAP_PACKING
map = UnpackMapWord(map);
#endif
return map >= first_map && map <= last_map;
}
#endif
V8_INLINE static int GetInstanceType(Address obj) {
Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
#ifdef V8_MAP_PACKING
map = UnpackMapWord(map);
#endif
return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
}
V8_INLINE static int GetOddballKind(const internal::Address obj) {
V8_INLINE static int GetOddballKind(Address obj) {
return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
}
@ -669,80 +701,92 @@ class Internals {
static_cast<unsigned>(kLastJSApiObjectType - kJSObjectType));
}
V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) {
V8_INLINE static uint8_t GetNodeFlag(Address* obj, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & static_cast<uint8_t>(1U << shift);
}
V8_INLINE static void UpdateNodeFlag(internal::Address* obj, bool value,
int shift) {
V8_INLINE static void UpdateNodeFlag(Address* obj, bool value, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
uint8_t mask = static_cast<uint8_t>(1U << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
V8_INLINE static uint8_t GetNodeState(internal::Address* obj) {
V8_INLINE static uint8_t GetNodeState(Address* obj) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & kNodeStateMask;
}
V8_INLINE static void UpdateNodeState(internal::Address* obj, uint8_t value) {
V8_INLINE static void UpdateNodeState(Address* obj, uint8_t value) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
void* data) {
internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
kIsolateEmbedderDataOffset +
slot * kApiSystemPointerSize;
Address addr = reinterpret_cast<Address>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
uint32_t slot) {
internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
kIsolateEmbedderDataOffset +
slot * kApiSystemPointerSize;
Address addr = reinterpret_cast<Address>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
return *reinterpret_cast<void* const*>(addr);
}
V8_INLINE static void IncrementLongTasksStatsCounter(v8::Isolate* isolate) {
internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
kIsolateLongTaskStatsCounterOffset;
Address addr =
reinterpret_cast<Address>(isolate) + kIsolateLongTaskStatsCounterOffset;
++(*reinterpret_cast<size_t*>(addr));
}
V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) {
internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
kIsolateRootsOffset +
index * kApiSystemPointerSize;
return reinterpret_cast<internal::Address*>(addr);
V8_INLINE static Address* GetRootSlot(v8::Isolate* isolate, int index) {
Address addr = reinterpret_cast<Address>(isolate) + kIsolateRootsOffset +
index * kApiSystemPointerSize;
return reinterpret_cast<Address*>(addr);
}
V8_INLINE static Address GetRoot(v8::Isolate* isolate, int index) {
#if V8_STATIC_ROOTS_BOOL
Address base = *reinterpret_cast<Address*>(
reinterpret_cast<uintptr_t>(isolate) + kIsolateCageBaseOffset);
switch (index) {
#define DECOMPRESS_ROOT(name) \
case k##name##RootIndex: \
return base + StaticReadOnlyRoot::k##name;
EXPORTED_STATIC_ROOTS_PTR_LIST(DECOMPRESS_ROOT)
#undef DECOMPRESS_ROOT
default:
break;
}
#undef EXPORTED_STATIC_ROOTS_PTR_LIST
#endif // V8_STATIC_ROOTS_BOOL
return *GetRootSlot(isolate, index);
}
#ifdef V8_ENABLE_SANDBOX
V8_INLINE static internal::Address* GetExternalPointerTableBase(
v8::Isolate* isolate) {
internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
kIsolateExternalPointerTableOffset +
kExternalPointerTableBufferOffset;
return *reinterpret_cast<internal::Address**>(addr);
V8_INLINE static Address* GetExternalPointerTableBase(v8::Isolate* isolate) {
Address addr = reinterpret_cast<Address>(isolate) +
kIsolateExternalPointerTableOffset +
kExternalPointerTableBufferOffset;
return *reinterpret_cast<Address**>(addr);
}
V8_INLINE static internal::Address* GetSharedExternalPointerTableBase(
V8_INLINE static Address* GetSharedExternalPointerTableBase(
v8::Isolate* isolate) {
internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
kIsolateSharedExternalPointerTableAddressOffset;
addr = *reinterpret_cast<internal::Address*>(addr);
Address addr = reinterpret_cast<Address>(isolate) +
kIsolateSharedExternalPointerTableAddressOffset;
addr = *reinterpret_cast<Address*>(addr);
addr += kExternalPointerTableBufferOffset;
return *reinterpret_cast<internal::Address**>(addr);
return *reinterpret_cast<Address**>(addr);
}
#endif
template <typename T>
V8_INLINE static T ReadRawField(internal::Address heap_object_ptr,
int offset) {
internal::Address addr = heap_object_ptr + offset - kHeapObjectTag;
V8_INLINE static T ReadRawField(Address heap_object_ptr, int offset) {
Address addr = heap_object_ptr + offset - kHeapObjectTag;
#ifdef V8_COMPRESS_POINTERS
if (sizeof(T) > kApiTaggedSize) {
// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
@ -757,29 +801,28 @@ class Internals {
return *reinterpret_cast<const T*>(addr);
}
V8_INLINE static internal::Address ReadTaggedPointerField(
internal::Address heap_object_ptr, int offset) {
V8_INLINE static Address ReadTaggedPointerField(Address heap_object_ptr,
int offset) {
#ifdef V8_COMPRESS_POINTERS
uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
internal::Address base =
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
return base + static_cast<Address>(static_cast<uintptr_t>(value));
#else
return ReadRawField<internal::Address>(heap_object_ptr, offset);
return ReadRawField<Address>(heap_object_ptr, offset);
#endif
}
V8_INLINE static internal::Address ReadTaggedSignedField(
internal::Address heap_object_ptr, int offset) {
V8_INLINE static Address ReadTaggedSignedField(Address heap_object_ptr,
int offset) {
#ifdef V8_COMPRESS_POINTERS
uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
return static_cast<internal::Address>(static_cast<uintptr_t>(value));
return static_cast<Address>(static_cast<uintptr_t>(value));
#else
return ReadRawField<internal::Address>(heap_object_ptr, offset);
return ReadRawField<Address>(heap_object_ptr, offset);
#endif
}
V8_INLINE static v8::Isolate* GetIsolateForSandbox(internal::Address obj) {
V8_INLINE static v8::Isolate* GetIsolateForSandbox(Address obj) {
#ifdef V8_ENABLE_SANDBOX
return reinterpret_cast<v8::Isolate*>(
internal::IsolateFromNeverReadOnlySpaceObject(obj));
@ -790,40 +833,37 @@ class Internals {
}
template <ExternalPointerTag tag>
V8_INLINE static internal::Address ReadExternalPointerField(
v8::Isolate* isolate, internal::Address heap_object_ptr, int offset) {
V8_INLINE static Address ReadExternalPointerField(v8::Isolate* isolate,
Address heap_object_ptr,
int offset) {
#ifdef V8_ENABLE_SANDBOX
if (IsSandboxedExternalPointerType(tag)) {
// See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so
// it can be inlined and doesn't require an additional call.
internal::Address* table =
IsSharedExternalPointerType(tag)
? GetSharedExternalPointerTableBase(isolate)
: GetExternalPointerTableBase(isolate);
internal::ExternalPointerHandle handle =
ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
uint32_t index = handle >> kExternalPointerIndexShift;
std::atomic<internal::Address>* ptr =
reinterpret_cast<std::atomic<internal::Address>*>(&table[index]);
internal::Address entry =
std::atomic_load_explicit(ptr, std::memory_order_relaxed);
return entry & ~tag;
}
#endif
static_assert(tag != kExternalPointerNullTag);
// See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so
// it can be inlined and doesn't require an additional call.
Address* table = IsSharedExternalPointerType(tag)
? GetSharedExternalPointerTableBase(isolate)
: GetExternalPointerTableBase(isolate);
internal::ExternalPointerHandle handle =
ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
uint32_t index = handle >> kExternalPointerIndexShift;
std::atomic<Address>* ptr =
reinterpret_cast<std::atomic<Address>*>(&table[index]);
Address entry = std::atomic_load_explicit(ptr, std::memory_order_relaxed);
return entry & ~tag;
#else
return ReadRawField<Address>(heap_object_ptr, offset);
#endif // V8_ENABLE_SANDBOX
}
#ifdef V8_COMPRESS_POINTERS
V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
internal::Address addr) {
V8_INLINE static Address GetPtrComprCageBaseFromOnHeapAddress(Address addr) {
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
}
V8_INLINE static internal::Address DecompressTaggedAnyField(
internal::Address heap_object_ptr, uint32_t value) {
internal::Address base =
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
V8_INLINE static Address DecompressTaggedField(Address heap_object_ptr,
uint32_t value) {
Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
return base + static_cast<Address>(static_cast<uintptr_t>(value));
}
#endif // V8_COMPRESS_POINTERS
@ -861,8 +901,58 @@ class BackingStoreBase {};
// This is needed for histograms sampling garbage collection reasons.
constexpr int kGarbageCollectionReasonMaxValue = 27;
} // namespace internal
// Helper functions about values contained in handles.
class ValueHelper final {
public:
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
static constexpr Address kLocalTaggedNullAddress = 1;
template <typename T>
static constexpr T* EmptyValue() {
return reinterpret_cast<T*>(kLocalTaggedNullAddress);
}
template <typename T>
V8_INLINE static Address ValueAsAddress(const T* value) {
return reinterpret_cast<Address>(value);
}
template <typename T, typename S>
V8_INLINE static T* SlotAsValue(S* slot) {
return *reinterpret_cast<T**>(slot);
}
template <typename T>
V8_INLINE static T* ValueAsSlot(T* const& value) {
return reinterpret_cast<T*>(const_cast<T**>(&value));
}
#else // !V8_ENABLE_CONSERVATIVE_STACK_SCANNING
template <typename T>
static constexpr T* EmptyValue() {
return nullptr;
}
template <typename T>
V8_INLINE static Address ValueAsAddress(const T* value) {
return *reinterpret_cast<const Address*>(value);
}
template <typename T, typename S>
V8_INLINE static T* SlotAsValue(S* slot) {
return reinterpret_cast<T*>(slot);
}
template <typename T>
V8_INLINE static T* ValueAsSlot(T* const& value) {
return value;
}
#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
};
} // namespace internal
} // namespace v8
#endif // INCLUDE_V8_INTERNAL_H_

View file

@ -233,7 +233,7 @@ class V8_EXPORT Isolate {
* Explicitly specify a startup snapshot blob. The embedder owns the blob.
* The embedder *must* ensure that the snapshot is from a trusted source.
*/
StartupData* snapshot_blob = nullptr;
const StartupData* snapshot_blob = nullptr;
/**
* Enables the host application to provide a mechanism for recording
@ -333,12 +333,9 @@ class V8_EXPORT Isolate {
const DisallowJavascriptExecutionScope&) = delete;
private:
OnFailure on_failure_;
v8::Isolate* v8_isolate_;
bool was_execution_allowed_assert_;
bool was_execution_allowed_throws_;
bool was_execution_allowed_dump_;
v8::Isolate* const v8_isolate_;
const OnFailure on_failure_;
bool was_execution_allowed_;
};
/**
@ -356,7 +353,7 @@ class V8_EXPORT Isolate {
const AllowJavascriptExecutionScope&) = delete;
private:
Isolate* v8_isolate_;
Isolate* const v8_isolate_;
bool was_execution_allowed_assert_;
bool was_execution_allowed_throws_;
bool was_execution_allowed_dump_;
@ -537,6 +534,8 @@ class V8_EXPORT Isolate {
kTurboFanOsrCompileStarted = 115,
kAsyncStackTaggingCreateTaskCall = 116,
kDurationFormat = 117,
kInvalidatedNumberStringPrototypeNoReplaceProtector = 118,
kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode = 119, // Unused.
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@ -924,27 +923,10 @@ class V8_EXPORT Isolate {
void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr);
void RemoveGCPrologueCallback(GCCallback callback);
START_ALLOW_USE_DEPRECATED()
/**
* Sets the embedder heap tracer for the isolate.
* SetEmbedderHeapTracer cannot be used simultaneously with AttachCppHeap.
*/
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
/*
* Gets the currently active heap tracer for the isolate that was set with
* SetEmbedderHeapTracer.
*/
EmbedderHeapTracer* GetEmbedderHeapTracer();
END_ALLOW_USE_DEPRECATED()
/**
* Sets an embedder roots handle that V8 should consider when performing
* non-unified heap garbage collections.
*
* Using only EmbedderHeapTracer automatically sets up a default handler.
* The intended use case is for setting a custom handler after invoking
* `AttachCppHeap()`.
* non-unified heap garbage collections. The intended use case is for setting
* a custom handler after invoking `AttachCppHeap()`.
*
* V8 does not take ownership of the handler.
*/
@ -955,8 +937,6 @@ class V8_EXPORT Isolate {
* embedder maintains ownership of the CppHeap. At most one C++ heap can be
* attached to V8.
*
* AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer.
*
* Multi-threaded use requires the use of v8::Locker/v8::Unlocker, see
* CppHeap.
*/
@ -1143,9 +1123,8 @@ class V8_EXPORT Isolate {
*
* This should only be used for testing purposes and not to enforce a garbage
* collection schedule. It has strong negative impact on the garbage
* collection performance. Use IdleNotificationDeadline() or
* LowMemoryNotification() instead to influence the garbage collection
* schedule.
* collection performance. Use MemoryPressureNotification() instead to
* influence the garbage collection schedule.
*/
void RequestGarbageCollectionForTesting(GarbageCollectionType type);
@ -1156,9 +1135,8 @@ class V8_EXPORT Isolate {
*
* This should only be used for testing purposes and not to enforce a garbage
* collection schedule. It has strong negative impact on the garbage
* collection performance. Use IdleNotificationDeadline() or
* LowMemoryNotification() instead to influence the garbage collection
* schedule.
* collection performance. Use MemoryPressureNotification() instead to
* influence the garbage collection schedule.
*/
void RequestGarbageCollectionForTesting(GarbageCollectionType type,
StackState stack_state);
@ -1310,6 +1288,8 @@ class V8_EXPORT Isolate {
* that function. There is no guarantee that the actual work will be done
* within the time limit.
*/
V8_DEPRECATE_SOON(
"Use MemoryPressureNotification() to influence the GC schedule.")
bool IdleNotificationDeadline(double deadline_in_seconds);
/**
@ -1346,11 +1326,13 @@ class V8_EXPORT Isolate {
* V8 uses this notification to guide heuristics which may result in a
* smaller memory footprint at the cost of reduced runtime performance.
*/
V8_DEPRECATED("Use IsolateInBackgroundNotification() instead")
void EnableMemorySavingsMode();
/**
* Optional notification which will disable the memory savings mode.
*/
V8_DEPRECATED("Use IsolateInBackgroundNotification() instead")
void DisableMemorySavingsMode();
/**
@ -1530,6 +1512,13 @@ class V8_EXPORT Isolate {
V8_DEPRECATED("Wasm exceptions are always enabled")
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
/**
* Register callback to control whehter Wasm GC is enabled.
* The callback overwrites the value of the flag.
* If the callback returns true, it will also enable Wasm stringrefs.
*/
void SetWasmGCEnabledCallback(WasmGCEnabledCallback callback);
void SetSharedArrayBufferConstructorEnabledCallback(
SharedArrayBufferConstructorEnabledCallback callback);
@ -1684,7 +1673,8 @@ uint32_t Isolate::GetNumberOfDataSlots() {
template <class T>
MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
T* data =
internal::ValueHelper::SlotAsValue<T>(GetDataFromSnapshotOnce(index));
if (data) internal::PerformCastCheck(data);
return Local<T>(data);
}

View file

@ -50,9 +50,14 @@ class TracedReference;
class TracedReferenceBase;
class Utils;
namespace debug {
class ConsoleCallArguments;
}
namespace internal {
template <typename T>
class CustomArguments;
class SamplingHeapProfiler;
} // namespace internal
namespace api_internal {
@ -92,6 +97,9 @@ class V8_EXPORT V8_NODISCARD HandleScope {
HandleScope(const HandleScope&) = delete;
void operator=(const HandleScope&) = delete;
static internal::Address* CreateHandleForCurrentIsolate(
internal::Address value);
protected:
V8_INLINE HandleScope() = default;
@ -122,6 +130,33 @@ class V8_EXPORT V8_NODISCARD HandleScope {
friend class Context;
};
namespace internal {
/**
* Helper functions about handles.
*/
class HandleHelper final {
public:
/**
* Checks whether two handles are equal.
* They are equal iff they are both empty or they are both non-empty and the
* objects to which they refer are physically equal.
*
* If both handles refer to JS objects, this is the same as strict equality.
* For primitives, such as numbers or strings, a `false` return value does not
* indicate that the values aren't equal in the JavaScript sense.
* Use `Value::StrictEquals()` to check primitives for equality.
*/
template <typename T1, typename T2>
V8_INLINE static bool EqualHandles(const T1& lhs, const T2& rhs) {
if (lhs.IsEmpty()) return rhs.IsEmpty();
if (rhs.IsEmpty()) return false;
return lhs.address() == rhs.address();
}
};
} // namespace internal
/**
* An object reference managed by the v8 garbage collector.
*
@ -154,7 +189,8 @@ class V8_EXPORT V8_NODISCARD HandleScope {
template <class T>
class Local {
public:
V8_INLINE Local() : val_(nullptr) {}
V8_INLINE Local() : val_(internal::ValueHelper::EmptyValue<T>()) {}
template <class S>
V8_INLINE Local(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
/**
@ -168,55 +204,40 @@ class Local {
/**
* Returns true if the handle is empty.
*/
V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
V8_INLINE bool IsEmpty() const {
return val_ == internal::ValueHelper::EmptyValue<T>();
}
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
V8_INLINE void Clear() { val_ = nullptr; }
V8_INLINE void Clear() { val_ = internal::ValueHelper::EmptyValue<T>(); }
V8_INLINE T* operator->() const { return val_; }
V8_INLINE T* operator*() const { return val_; }
/**
* Checks whether two handles are the same.
* Returns true if both are empty, or if the objects to which they refer
* are identical.
*
* If both handles refer to JS objects, this is the same as strict equality.
* For primitives, such as numbers or strings, a `false` return value does not
* indicate that the values aren't equal in the JavaScript sense.
* Use `Value::StrictEquals()` to check primitives for equality.
*/
template <class S>
V8_INLINE bool operator==(const Local<S>& that) const {
internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
}
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
}
/**
* Checks whether two handles are different.
* Returns true if only one of the handles is empty, or if
* the objects to which they refer are different.
* Checks whether two handles are equal or different.
* They are equal iff they are both empty or they are both non-empty and the
* objects to which they refer are physically equal.
*
* If both handles refer to JS objects, this is the same as strict
* non-equality. For primitives, such as numbers or strings, a `true` return
* value does not indicate that the values aren't equal in the JavaScript
* sense. Use `Value::StrictEquals()` to check primitives for equality.
*/
template <class S>
V8_INLINE bool operator==(const Local<S>& that) const {
return internal::HandleHelper::EqualHandles(*this, that);
}
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
return internal::HandleHelper::EqualHandles(*this, that);
}
template <class S>
V8_INLINE bool operator!=(const Local<S>& that) const {
return !operator==(that);
@ -263,12 +284,12 @@ class Local {
V8_INLINE static Local<T> New(Isolate* isolate,
const PersistentBase<T>& that) {
return New(isolate, that.val_);
return New(isolate, internal::ValueHelper::SlotAsValue<T>(that.val_));
}
V8_INLINE static Local<T> New(Isolate* isolate,
const BasicTracedReference<T>& that) {
return New(isolate, *that);
return New(isolate, internal::ValueHelper::SlotAsValue<T>(*that));
}
private:
@ -277,12 +298,6 @@ class Local {
template <class F>
friend class Eternal;
template <class F>
friend class PersistentBase;
template <class F, class M>
friend class Persistent;
template <class F>
friend class Local;
template <class F>
friend class MaybeLocal;
template <class F>
friend class FunctionCallbackInfo;
@ -309,19 +324,31 @@ class Local {
friend class ReturnValue;
template <class F>
friend class Traced;
template <class F>
friend class BasicTracedReference;
template <class F>
friend class TracedReference;
friend class internal::SamplingHeapProfiler;
friend class internal::HandleHelper;
friend class debug::ConsoleCallArguments;
explicit V8_INLINE Local(T* that) : val_(that) {}
V8_INLINE internal::Address address() const {
return internal::ValueHelper::ValueAsAddress(val_);
}
V8_INLINE static Local<T> FromSlot(internal::Address* slot) {
return Local<T>(internal::ValueHelper::SlotAsValue<T>(slot));
}
V8_INLINE static Local<T> New(Isolate* isolate, T* that) {
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
return Local<T>(that);
#else
if (that == nullptr) return Local<T>();
T* that_ptr = that;
internal::Address* p = reinterpret_cast<internal::Address*>(that_ptr);
internal::Address* p = reinterpret_cast<internal::Address*>(that);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
reinterpret_cast<internal::Isolate*>(isolate), *p)));
#endif
}
T* val_;
};
@ -344,13 +371,15 @@ using Handle = Local<T>;
template <class T>
class MaybeLocal {
public:
V8_INLINE MaybeLocal() : val_(nullptr) {}
V8_INLINE MaybeLocal() : val_(internal::ValueHelper::EmptyValue<T>()) {}
template <class S>
V8_INLINE MaybeLocal(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
static_assert(std::is_base_of<T, S>::value, "type check");
}
V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
V8_INLINE bool IsEmpty() const {
return val_ == internal::ValueHelper::EmptyValue<T>();
}
/**
* Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
@ -358,7 +387,7 @@ class MaybeLocal {
*/
template <class S>
V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local<S>* out) const {
out->val_ = IsEmpty() ? nullptr : this->val_;
out->val_ = IsEmpty() ? internal::ValueHelper::EmptyValue<T>() : this->val_;
return !IsEmpty();
}
@ -367,7 +396,7 @@ class MaybeLocal {
* V8 will crash the process.
*/
V8_INLINE Local<T> ToLocalChecked() {
if (V8_UNLIKELY(val_ == nullptr)) api_internal::ToLocalEmpty();
if (V8_UNLIKELY(IsEmpty())) api_internal::ToLocalEmpty();
return Local<T>(val_);
}
@ -399,9 +428,13 @@ class V8_EXPORT V8_NODISCARD EscapableHandleScope : public HandleScope {
*/
template <class T>
V8_INLINE Local<T> Escape(Local<T> value) {
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
return value;
#else
internal::Address* slot =
Escape(reinterpret_cast<internal::Address*>(*value));
return Local<T>(reinterpret_cast<T*>(slot));
#endif
}
template <class T>

View file

@ -12,6 +12,7 @@
#include "v8-internal.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@ -96,16 +97,42 @@ struct GarbageCollectionYoungCycle {
};
struct WasmModuleDecoded {
WasmModuleDecoded() = default;
WasmModuleDecoded(bool async, bool streamed, bool success,
size_t module_size_in_bytes, size_t function_count,
int64_t wall_clock_duration_in_us)
: async(async),
streamed(streamed),
success(success),
module_size_in_bytes(module_size_in_bytes),
function_count(function_count),
wall_clock_duration_in_us(wall_clock_duration_in_us) {}
bool async = false;
bool streamed = false;
bool success = false;
size_t module_size_in_bytes = 0;
size_t function_count = 0;
int64_t wall_clock_duration_in_us = -1;
int64_t cpu_duration_in_us = -1;
};
struct WasmModuleCompiled {
WasmModuleCompiled() = default;
WasmModuleCompiled(bool async, bool streamed, bool cached, bool deserialized,
bool lazy, bool success, size_t code_size_in_bytes,
size_t liftoff_bailout_count,
int64_t wall_clock_duration_in_us)
: async(async),
streamed(streamed),
cached(cached),
deserialized(deserialized),
lazy(lazy),
success(success),
code_size_in_bytes(code_size_in_bytes),
liftoff_bailout_count(liftoff_bailout_count),
wall_clock_duration_in_us(wall_clock_duration_in_us) {}
bool async = false;
bool streamed = false;
bool cached = false;
@ -115,7 +142,6 @@ struct WasmModuleCompiled {
size_t code_size_in_bytes = 0;
size_t liftoff_bailout_count = 0;
int64_t wall_clock_duration_in_us = -1;
int64_t cpu_duration_in_us = -1;
};
struct WasmModuleInstantiated {

View file

@ -607,6 +607,19 @@ class V8_EXPORT Object : public Value {
return object.val_->GetCreationContext();
}
/**
* Gets the context in which the object was created (see GetCreationContext())
* and if it's available reads respective embedder field value.
* If the context can't be obtained nullptr is returned.
* Basically it's a shortcut for
* obj->GetCreationContext().GetAlignedPointerFromEmbedderData(index)
* which doesn't create a handle for Context object on the way and doesn't
* try to expand the embedder data attached to the context.
* In case the Local<Context> is already available because of other reasons,
* it's fine to keep using Context::GetAlignedPointerFromEmbedderData().
*/
void* GetAlignedPointerFromEmbedderDataInCreationContext(int index);
/**
* Checks whether a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
@ -707,7 +720,7 @@ Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
int instance_type = I::GetInstanceType(obj);
@ -717,12 +730,17 @@ Local<Value> Object::GetInternalField(int index) {
#ifdef V8_COMPRESS_POINTERS
// We read the full pointer value and then decompress it in order to avoid
// dealing with potential endiannes issues.
value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
value = I::DecompressTaggedField(obj, static_cast<uint32_t>(value));
#endif
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
return Local<Value>(reinterpret_cast<Value*>(value));
#else
internal::Isolate* isolate =
internal::IsolateFromNeverReadOnlySpaceObject(obj);
A* result = HandleScope::CreateHandle(isolate, value);
return Local<Value>(reinterpret_cast<Value*>(result));
#endif
}
#endif
return SlowGetInternalField(index);
@ -732,7 +750,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
#if !defined(V8_ENABLE_CHECKS)
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);

View file

@ -55,7 +55,7 @@ class Eternal {
V8_INLINE Local<T> Get(Isolate* isolate) const {
// The eternal handle will never go away, so as with the roots, we don't
// even need to open a handle.
return Local<T>(val_);
return Local<T>(internal::ValueHelper::SlotAsValue<T>(val_));
}
V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
@ -68,6 +68,10 @@ class Eternal {
}
private:
V8_INLINE internal::Address address() const {
return *reinterpret_cast<internal::Address*>(val_);
}
T* val_;
};
@ -122,20 +126,12 @@ class PersistentBase {
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
return internal::HandleHelper::EqualHandles(*this, that);
}
template <class S>
V8_INLINE bool operator==(const Local<S>& that) const {
internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
return internal::HandleHelper::EqualHandles(*this, that);
}
template <class S>
@ -221,8 +217,15 @@ class PersistentBase {
template <class F1, class F2>
friend class PersistentValueVector;
friend class Object;
friend class internal::HandleHelper;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
V8_INLINE T* operator*() const { return this->val_; }
V8_INLINE internal::Address address() const {
return *reinterpret_cast<internal::Address*>(val_);
}
V8_INLINE static T* New(Isolate* isolate, Local<T> that);
V8_INLINE static T* New(Isolate* isolate, T* that);
T* val_;
@ -252,7 +255,7 @@ class NonCopyablePersistentTraits {
* This will clone the contents of storage cell, but not any of the flags, etc.
*/
template <class T>
struct CopyablePersistentTraits {
struct V8_DEPRECATED("Use v8::Global instead") CopyablePersistentTraits {
using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
static const bool kResetInDestructor = true;
template <class S, class M>
@ -282,11 +285,13 @@ class Persistent : public PersistentBase<T> {
* When the Local is non-empty, a new storage cell is created
* pointing to the same object, and no flags are set.
*/
template <class S>
V8_INLINE Persistent(Isolate* isolate, Local<S> that)
: PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
: PersistentBase<T>(PersistentBase<T>::New(isolate, that)) {
static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
* Construct a Persistent from a Persistent.
* When the Persistent is non-empty, a new storage cell is created
@ -356,7 +361,6 @@ class Persistent : public PersistentBase<T> {
friend class ReturnValue;
explicit V8_INLINE Persistent(T* that) : PersistentBase<T>(that) {}
V8_INLINE T* operator*() const { return this->val_; }
template <class S, class M2>
V8_INLINE void Copy(const Persistent<S, M2>& that);
};
@ -381,7 +385,7 @@ class Global : public PersistentBase<T> {
*/
template <class S>
V8_INLINE Global(Isolate* isolate, Local<S> that)
: PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
: PersistentBase<T>(PersistentBase<T>::New(isolate, that)) {
static_assert(std::is_base_of<T, S>::value, "type check");
}
@ -425,7 +429,6 @@ class Global : public PersistentBase<T> {
private:
template <class F>
friend class ReturnValue;
V8_INLINE T* operator*() const { return this->val_; }
};
// UniquePersistent is an alias for Global for historical reason.
@ -442,6 +445,12 @@ class V8_EXPORT PersistentHandleVisitor {
uint16_t class_id) {}
};
template <class T>
T* PersistentBase<T>::New(Isolate* isolate, Local<T> that) {
return PersistentBase<T>::New(isolate,
internal::ValueHelper::ValueAsSlot(*that));
}
template <class T>
T* PersistentBase<T>::New(Isolate* isolate, T* that) {
if (that == nullptr) return nullptr;
@ -486,7 +495,7 @@ void PersistentBase<T>::Reset(Isolate* isolate, const Local<S>& other) {
static_assert(std::is_base_of<T, S>::value, "type check");
Reset();
if (other.IsEmpty()) return;
this->val_ = New(isolate, other.val_);
this->val_ = New(isolate, internal::ValueHelper::ValueAsSlot(*other));
}
/**

View file

@ -5,9 +5,11 @@
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h> // For abort.
#include <memory>
#include <string>
@ -265,6 +267,38 @@ class JobTask {
virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
};
/**
* A "blocking call" refers to any call that causes the calling thread to wait
* off-CPU. It includes but is not limited to calls that wait on synchronous
* file I/O operations: read or write a file from disk, interact with a pipe or
* a socket, rename or delete a file, enumerate files in a directory, etc.
* Acquiring a low contention lock is not considered a blocking call.
*/
/**
* BlockingType indicates the likelihood that a blocking call will actually
* block.
*/
enum class BlockingType {
// The call might block (e.g. file I/O that might hit in memory cache).
kMayBlock,
// The call will definitely block (e.g. cache already checked and now pinging
// server synchronously).
kWillBlock
};
/**
* This class is instantiated with CreateBlockingScope() in every scope where a
* blocking call is made and serves as a precise annotation of the scope that
* may/will block. May be implemented by an embedder to adjust the thread count.
* CPU usage should be minimal within that scope. ScopedBlockingCalls can be
* nested.
*/
class ScopedBlockingCall {
public:
virtual ~ScopedBlockingCall() = default;
};
/**
* The interface represents complex arguments to trace events.
*/
@ -285,6 +319,8 @@ class ConvertableToTraceFormat {
* V8 Tracing controller.
*
* Can be implemented by an embedder to record trace events from V8.
*
* Will become obsolete in Perfetto SDK build (v8_use_perfetto = true).
*/
class TracingController {
public:
@ -348,10 +384,16 @@ class TracingController {
virtual void OnTraceDisabled() = 0;
};
/** Adds tracing state change observer. */
/**
* Adds tracing state change observer.
* Does nothing in Perfetto SDK build (v8_use_perfetto = true).
*/
virtual void AddTraceStateObserver(TraceStateObserver*) {}
/** Removes tracing state change observer. */
/**
* Removes tracing state change observer.
* Does nothing in Perfetto SDK build (v8_use_perfetto = true).
*/
virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
};
@ -534,7 +576,7 @@ static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
// to avoid pulling in large OS header files into this header file. Instead,
// the users of these routines are expected to include the respecitve OS
// headers in addition to this one.
#if V8_OS_MACOS
#if V8_OS_DARWIN
// Convert between a shared memory handle and a mach_port_t referencing a memory
// entry object.
inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
@ -945,11 +987,12 @@ class Platform {
virtual void OnCriticalMemoryPressure() {}
/**
* Gets the number of worker threads used by
* Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
* of tasks a work package should be split into. A return value of 0 means
* that there are no worker threads available. Note that a value of 0 won't
* prohibit V8 from posting tasks using |CallOnWorkerThread|.
* Gets the max number of worker threads that may be used to execute
* concurrent work scheduled for any single TaskPriority by
* Call(BlockingTask)OnWorkerThread() or PostJob(). This can be used to
* estimate the number of tasks a work package should be split into. A return
* value of 0 means that there are no worker threads available. Note that a
* value of 0 won't prohibit V8 from posting tasks using |CallOnWorkerThread|.
*/
virtual int NumberOfWorkerThreads() = 0;
@ -1064,6 +1107,14 @@ class Platform {
virtual std::unique_ptr<JobHandle> CreateJob(
TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
/**
* Instantiates a ScopedBlockingCall to annotate a scope that may/will block.
*/
virtual std::unique_ptr<ScopedBlockingCall> CreateBlockingScope(
BlockingType blocking_type) {
return nullptr;
}
/**
* Monotonically increasing time in seconds from an arbitrary fixed point in
* the past. This function is expected to return at least
@ -1074,11 +1125,28 @@ class Platform {
virtual double MonotonicallyIncreasingTime() = 0;
/**
* Current wall-clock time in milliseconds since epoch.
* This function is expected to return at least millisecond-precision values.
* Current wall-clock time in milliseconds since epoch. Use
* CurrentClockTimeMillisHighResolution() when higher precision is
* required.
*/
virtual int64_t CurrentClockTimeMilliseconds() {
return floor(CurrentClockTimeMillis());
}
/**
* This function is deprecated and will be deleted. Use either
* CurrentClockTimeMilliseconds() or
* CurrentClockTimeMillisecondsHighResolution().
*/
virtual double CurrentClockTimeMillis() = 0;
/**
* Same as CurrentClockTimeMilliseconds(), but with more precision.
*/
virtual double CurrentClockTimeMillisecondsHighResolution() {
return CurrentClockTimeMillis();
}
typedef void (*StackTracePrinter)();
/**

View file

@ -493,8 +493,15 @@ class V8_EXPORT String : public Name {
/**
* Returns true if this string can be made external.
*/
V8_DEPRECATE_SOON("Use the version that takes an encoding as argument.")
bool CanMakeExternal() const;
/**
* Returns true if this string can be made external, given the encoding for
* the external string resource.
*/
bool CanMakeExternal(Encoding encoding) const;
/**
* Returns true if the strings values are equal. Same as JS ==/===.
*/
@ -776,14 +783,14 @@ Local<String> String::Empty(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
return Local<String>(reinterpret_cast<String*>(slot));
S* slot = I::GetRootSlot(isolate, I::kEmptyStringRootIndex);
return Local<String>::FromSlot(slot);
}
String::ExternalStringResource* String::GetExternalStringResource() const {
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<const A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
ExternalStringResource* result;
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
@ -804,7 +811,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
String::Encoding* encoding_out) const {
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<const A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
int type = I::GetInstanceType(obj) & I::kStringRepresentationAndEncodingMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
ExternalStringResourceBase* resource;
@ -829,32 +836,32 @@ V8_INLINE Local<Primitive> Undefined(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
S* slot = I::GetRootSlot(isolate, I::kUndefinedValueRootIndex);
return Local<Primitive>::FromSlot(slot);
}
V8_INLINE Local<Primitive> Null(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
S* slot = I::GetRootSlot(isolate, I::kNullValueRootIndex);
return Local<Primitive>::FromSlot(slot);
}
V8_INLINE Local<Boolean> True(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
S* slot = I::GetRootSlot(isolate, I::kTrueValueRootIndex);
return Local<Boolean>::FromSlot(slot);
}
V8_INLINE Local<Boolean> False(Isolate* isolate) {
using S = internal::Address;
using I = internal::Internals;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
S* slot = I::GetRootSlot(isolate, I::kFalseValueRootIndex);
return Local<Boolean>::FromSlot(slot);
}
Local<Boolean> Boolean::New(Isolate* isolate, bool value) {

View file

@ -596,6 +596,7 @@ class V8_EXPORT HeapGraphNode {
kBigInt = 13, // BigInt.
kObjectShape = 14, // Internal data used for tracking the shapes (or
// "hidden classes") of JS objects.
kWasmObject = 15, // A WasmGC struct or array.
};
/** Returns node type (see HeapGraphNode::Type). */

View file

@ -11,6 +11,7 @@
#include <memory>
#include <vector>
#include "v8-callbacks.h" // NOLINT(build/include_directory)
#include "v8-data.h" // NOLINT(build/include_directory)
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8-maybe.h" // NOLINT(build/include_directory)
@ -347,6 +348,12 @@ class V8_EXPORT Script {
* ScriptOrigin. This can be either a v8::String or v8::Undefined.
*/
Local<Value> GetResourceName();
/**
* If the script was compiled, returns the positions of lazy functions which
* were eventually compiled and executed.
*/
std::vector<int> GetProducedCompileHints() const;
};
enum class ScriptType { kClassic, kModule };
@ -407,6 +414,8 @@ class V8_EXPORT ScriptCompiler {
V8_INLINE explicit Source(
Local<String> source_string, CachedData* cached_data = nullptr,
ConsumeCodeCacheTask* consume_cache_task = nullptr);
V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
CompileHintCallback callback, void* callback_data);
V8_INLINE ~Source() = default;
// Ownership of the CachedData or its buffers is *not* transferred to the
@ -434,6 +443,10 @@ class V8_EXPORT ScriptCompiler {
// set when calling a compile method.
std::unique_ptr<CachedData> cached_data;
std::unique_ptr<ConsumeCodeCacheTask> consume_cache_task;
// For requesting compile hints from the embedder.
CompileHintCallback compile_hint_callback = nullptr;
void* compile_hint_callback_data = nullptr;
};
/**
@ -562,7 +575,9 @@ class V8_EXPORT ScriptCompiler {
enum CompileOptions {
kNoCompileOptions = 0,
kConsumeCodeCache,
kEagerCompile
kEagerCompile,
kProduceCompileHints,
kConsumeCompileHints
};
/**
@ -775,6 +790,19 @@ ScriptCompiler::Source::Source(Local<String> string, CachedData* data,
cached_data(data),
consume_cache_task(consume_cache_task) {}
ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
CompileHintCallback callback,
void* callback_data)
: source_string(string),
resource_name(origin.ResourceName()),
resource_line_offset(origin.LineOffset()),
resource_column_offset(origin.ColumnOffset()),
resource_options(origin.Options()),
source_map_url(origin.SourceMapUrl()),
host_defined_options(origin.GetHostDefinedOptions()),
compile_hint_callback(callback),
compile_hint_callback_data(callback_data) {}
const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
const {
return cached_data.get();

View file

@ -91,7 +91,7 @@ class V8_EXPORT SnapshotCreator {
*/
SnapshotCreator(Isolate* isolate,
const intptr_t* external_references = nullptr,
StartupData* existing_blob = nullptr);
const StartupData* existing_blob = nullptr);
/**
* Create and enter an isolate, and set it up for serialization.
@ -102,7 +102,7 @@ class V8_EXPORT SnapshotCreator {
* that must be equivalent to CreateParams::external_references.
*/
SnapshotCreator(const intptr_t* external_references = nullptr,
StartupData* existing_blob = nullptr);
const StartupData* existing_blob = nullptr);
/**
* Destroy the snapshot creator, and exit and dispose of the Isolate
@ -179,16 +179,12 @@ class V8_EXPORT SnapshotCreator {
template <class T>
size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
T* object_ptr = *object;
internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
return AddData(context, *p);
return AddData(context, internal::ValueHelper::ValueAsAddress(*object));
}
template <class T>
size_t SnapshotCreator::AddData(Local<T> object) {
T* object_ptr = *object;
internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
return AddData(*p);
return AddData(internal::ValueHelper::ValueAsAddress(*object));
}
} // namespace v8

View file

@ -30,7 +30,9 @@ class Signature;
F(AsyncIteratorPrototype, initial_async_iterator_prototype) \
F(ErrorPrototype, initial_error_prototype) \
F(IteratorPrototype, initial_iterator_prototype) \
F(ObjProto_valueOf, object_value_of_function)
F(MapIteratorPrototype, initial_map_iterator_prototype) \
F(ObjProto_valueOf, object_value_of_function) \
F(SetIteratorPrototype, initial_set_iterator_prototype)
enum Intrinsic {
#define V8_DECL_INTRINSIC(name, iname) k##name,

View file

@ -62,7 +62,8 @@ class TracedReferenceBase {
*/
V8_INLINE v8::Local<v8::Value> Get(v8::Isolate* isolate) const {
if (IsEmpty()) return Local<Value>();
return Local<Value>::New(isolate, reinterpret_cast<Value*>(val_));
return Local<Value>::New(isolate,
internal::ValueHelper::SlotAsValue<Value>(val_));
}
/**
@ -103,10 +104,13 @@ class TracedReferenceBase {
V8_EXPORT void CheckValue() const;
V8_INLINE internal::Address address() const { return *val_; }
// val_ points to a GlobalHandles node.
internal::Address* val_ = nullptr;
friend class internal::BasicTracedReferenceExtractor;
friend class internal::HandleHelper;
template <typename F>
friend class Local;
template <typename U>
@ -117,11 +121,11 @@ class TracedReferenceBase {
/**
* A traced handle with copy and move semantics. The handle is to be used
* together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
* (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
* together as part of GarbageCollected objects (see v8-cppgc.h) or from stack
* and specifies edges from C++ objects to JavaScript.
*
* The exact semantics are:
* - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
* - Tracing garbage collections using CppHeap.
* - Non-tracing garbage collections refer to
* |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
* be treated as root or not.
@ -135,7 +139,12 @@ class BasicTracedReference : public TracedReferenceBase {
/**
* Construct a Local<T> from this handle.
*/
Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
Local<T> Get(Isolate* isolate) const {
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
if (val_ == nullptr) return Local<T>();
#endif
return Local<T>::New(isolate, *this);
}
template <class S>
V8_INLINE BasicTracedReference<S>& As() const {
@ -166,7 +175,6 @@ class BasicTracedReference : public TracedReferenceBase {
Isolate* isolate, T* that, void* slot,
internal::GlobalHandleStoreMode store_mode);
friend class EmbedderHeapTracer;
template <typename F>
friend class Local;
friend class Object;
@ -181,13 +189,7 @@ class BasicTracedReference : public TracedReferenceBase {
/**
* A traced handle without destructor that clears the handle. The embedder needs
* to ensure that the handle is not accessed once the V8 object has been
* reclaimed. This can happen when the handle is not passed through the
* EmbedderHeapTracer. For more details see BasicTracedReference.
*
* The reference assumes the embedder has precise knowledge about references at
* all times. In case V8 needs to separately handle on-stack references, the
* embedder is required to set the stack start through
* |EmbedderHeapTracer::SetStackStart|.
* reclaimed. For more details see BasicTracedReference.
*/
template <typename T>
class TracedReference : public BasicTracedReference<T> {
@ -207,7 +209,7 @@ class TracedReference : public BasicTracedReference<T> {
*/
template <class S>
TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
this->val_ = this->New(isolate, that.val_, &this->val_,
this->val_ = this->New(isolate, *that, &this->val_,
internal::GlobalHandleStoreMode::kInitializingStore);
static_assert(std::is_base_of<T, S>::value, "type check");
}
@ -291,7 +293,7 @@ template <class T>
internal::Address* BasicTracedReference<T>::New(
Isolate* isolate, T* that, void* slot,
internal::GlobalHandleStoreMode store_mode) {
if (that == nullptr) return nullptr;
if (that == internal::ValueHelper::EmptyValue<T>()) return nullptr;
internal::Address* p = reinterpret_cast<internal::Address*>(that);
return internal::GlobalizeTracedReference(
reinterpret_cast<internal::Isolate*>(isolate), p,
@ -306,21 +308,13 @@ void TracedReferenceBase::Reset() {
V8_INLINE bool operator==(const TracedReferenceBase& lhs,
const TracedReferenceBase& rhs) {
v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(rhs.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
return internal::HandleHelper::EqualHandles(lhs, rhs);
}
template <typename U>
V8_INLINE bool operator==(const TracedReferenceBase& lhs,
const v8::Local<U>& rhs) {
v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
return internal::HandleHelper::EqualHandles(lhs, rhs);
}
template <typename U>
@ -353,7 +347,7 @@ void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
this->Reset();
if (other.IsEmpty()) return;
this->SetSlotThreadSafe(
this->New(isolate, other.val_, &this->val_,
this->New(isolate, *other, &this->val_,
internal::GlobalHandleStoreMode::kAssigningStore));
}

View file

@ -181,7 +181,11 @@ class PersistentValueMapBase {
* Get value stored in map.
*/
Local<V> Get(const K& key) {
return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, key)));
V* p = FromVal(Traits::Get(&impl_, key));
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
if (p == nullptr) return Local<V>();
#endif
return Local<V>::New(isolate_, p);
}
/**
@ -236,7 +240,8 @@ class PersistentValueMapBase {
: value_(other.value_) { }
Local<V> NewLocal(Isolate* isolate) const {
return Local<V>::New(isolate, FromVal(value_));
return Local<V>::New(
isolate, internal::ValueHelper::SlotAsValue<V>(FromVal(value_)));
}
bool IsEmpty() const {
return value_ == kPersistentContainerNotFound;
@ -613,7 +618,8 @@ class V8_DEPRECATE_SOON("Use std::vector<Global<V>>.") PersistentValueVector {
* Retrieve the i-th value in the vector.
*/
Local<V> Get(size_t index) const {
return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, index)));
return Local<V>::New(isolate_, internal::ValueHelper::SlotAsValue<V>(
FromVal(Traits::Get(&impl_, index))));
}
/**

View file

@ -344,6 +344,11 @@ class V8_EXPORT Value : public Data {
*/
bool IsWasmModuleObject() const;
/**
* Returns true if this value is the WasmNull object.
*/
bool IsWasmNull() const;
/**
* Returns true if the value is a Module Namespace Object.
*/
@ -469,10 +474,14 @@ bool Value::IsUndefined() const {
bool Value::QuickIsUndefined() const {
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<const A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
#if V8_STATIC_ROOTS_BOOL
return I::is_identical(obj, I::StaticReadOnlyRoot::kUndefinedValue);
#else
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
#endif // V8_STATIC_ROOTS_BOOL
}
bool Value::IsNull() const {
@ -486,10 +495,14 @@ bool Value::IsNull() const {
bool Value::QuickIsNull() const {
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<const A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
#if V8_STATIC_ROOTS_BOOL
return I::is_identical(obj, I::StaticReadOnlyRoot::kNullValue);
#else
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kNullOddballKind);
#endif // V8_STATIC_ROOTS_BOOL
}
bool Value::IsNullOrUndefined() const {
@ -501,13 +514,17 @@ bool Value::IsNullOrUndefined() const {
}
bool Value::QuickIsNullOrUndefined() const {
#if V8_STATIC_ROOTS_BOOL
return QuickIsNull() || QuickIsUndefined();
#else
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<const A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
int kind = I::GetOddballKind(obj);
return kind == I::kNullOddballKind || kind == I::kUndefinedOddballKind;
#endif // V8_STATIC_ROOTS_BOOL
}
bool Value::IsString() const {
@ -521,9 +538,14 @@ bool Value::IsString() const {
bool Value::QuickIsString() const {
using A = internal::Address;
using I = internal::Internals;
A obj = *reinterpret_cast<const A*>(this);
A obj = internal::ValueHelper::ValueAsAddress(this);
if (!I::HasHeapObjectTag(obj)) return false;
#if V8_STATIC_ROOTS_BOOL && !V8_MAP_PACKING
return I::CheckInstanceMapRange(obj, I::StaticReadOnlyRoot::kFirstStringMap,
I::StaticReadOnlyRoot::kLastStringMap);
#else
return (I::GetInstanceType(obj) < I::kFirstNonstringType);
#endif // V8_STATIC_ROOTS_BOOL
}
} // namespace v8

View file

@ -8,10 +8,10 @@
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 10
#define V8_MINOR_VERSION 9
#define V8_BUILD_NUMBER 194
#define V8_PATCH_LEVEL 9
#define V8_MAJOR_VERSION 11
#define V8_MINOR_VERSION 3
#define V8_BUILD_NUMBER 244
#define V8_PATCH_LEVEL 4
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

View file

@ -144,7 +144,7 @@ class V8_EXPORT WasmStreaming final {
/**
* {Finish} should be called after all received bytes where passed to
* {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish}
* does not have to be called after {Abort} has been called already.
* must not be called after {Abort} has been called already.
* If {can_use_compiled_module} is true and {SetCompiledModuleBytes} was
* previously called, the compiled module bytes can be used.
* If {can_use_compiled_module} is false, the compiled module bytes previously
@ -156,6 +156,7 @@ class V8_EXPORT WasmStreaming final {
* Abort streaming compilation. If {exception} has a value, then the promise
* associated with streaming compilation is rejected with that value. If
* {exception} does not have value, the promise does not get rejected.
* {Abort} must not be called repeatedly, or after {Finish}.
*/
void Abort(MaybeLocal<Value> exception);

View file

@ -173,6 +173,7 @@ path. Add it with -I<path> to the command line
// V8_TARGET_OS_LINUX
// V8_TARGET_OS_MACOS
// V8_TARGET_OS_WIN
// V8_TARGET_OS_CHROMEOS
//
// If not set explicitly, these fall back to corresponding V8_OS_ values.
@ -184,7 +185,8 @@ path. Add it with -I<path> to the command line
&& !defined(V8_TARGET_OS_IOS) \
&& !defined(V8_TARGET_OS_LINUX) \
&& !defined(V8_TARGET_OS_MACOS) \
&& !defined(V8_TARGET_OS_WIN)
&& !defined(V8_TARGET_OS_WIN) \
&& !defined(V8_TARGET_OS_CHROMEOS)
# error No known target OS defined.
# endif
@ -195,7 +197,8 @@ path. Add it with -I<path> to the command line
|| defined(V8_TARGET_OS_IOS) \
|| defined(V8_TARGET_OS_LINUX) \
|| defined(V8_TARGET_OS_MACOS) \
|| defined(V8_TARGET_OS_WIN)
|| defined(V8_TARGET_OS_WIN) \
|| defined(V8_TARGET_OS_CHROMEOS)
# error A target OS is defined but V8_HAVE_TARGET_OS is unset.
# endif
@ -308,6 +311,9 @@ path. Add it with -I<path> to the command line
// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
// V8_HAS_BUILTIN_FRAME_ADDRESS - __builtin_frame_address() supported
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
// V8_HAS_BUILTIN_ADD_OVERFLOW - __builtin_add_overflow() supported
// V8_HAS_BUILTIN_SUB_OVERFLOW - __builtin_sub_overflow() supported
// V8_HAS_BUILTIN_MUL_OVERFLOW - __builtin_mul_overflow() supported
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
@ -339,9 +345,25 @@ path. Add it with -I<path> to the command line
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_CONSTINIT \
(__has_attribute(require_constant_initialization))
# define V8_HAS_ATTRIBUTE_CONST (__has_attribute(const))
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
// Support for the "preserve_most" attribute is limited:
// - 32-bit platforms do not implement it,
// - component builds fail because _dl_runtime_resolve clobbers registers,
// - we see crashes on arm64 on Windows (https://crbug.com/1409934), which can
// hopefully be fixed in the future.
// Additionally, the initial implementation in clang <= 16 overwrote the return
// register(s) in the epilogue of a preserve_most function, so we only use
// preserve_most in clang >= 17 (see https://reviews.llvm.org/D143425).
#if (defined(_M_X64) || defined(__x86_64__) /* x64 (everywhere) */ \
|| ((defined(__AARCH64EL__) || defined(_M_ARM64)) /* arm64, but ... */ \
&& !defined(_WIN32))) /* not on windows */ \
&& !defined(COMPONENT_BUILD) /* no component build */\
&& __clang_major__ >= 17 /* clang >= 17 */
# define V8_HAS_ATTRIBUTE_PRESERVE_MOST (__has_attribute(preserve_most))
#endif
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
@ -360,6 +382,9 @@ path. Add it with -I<path> to the command line
# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define V8_HAS_BUILTIN_FRAME_ADDRESS (__has_builtin(__builtin_frame_address))
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
# define V8_HAS_BUILTIN_ADD_OVERFLOW (__has_builtin(__builtin_add_overflow))
# define V8_HAS_BUILTIN_SUB_OVERFLOW (__has_builtin(__builtin_sub_overflow))
# define V8_HAS_BUILTIN_MUL_OVERFLOW (__has_builtin(__builtin_mul_overflow))
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
@ -455,6 +480,16 @@ path. Add it with -I<path> to the command line
#endif
// A macro to mark functions whose values don't change (e.g. across calls)
// and thereby compiler is free to hoist and fold multiple calls together.
// Use like:
// V8_CONST int foo() { ... }
#if V8_HAS_ATTRIBUTE_CONST
# define V8_CONST __attribute__((const))
#else
# define V8_CONST
#endif
// A macro to mark a declaration as requiring constant initialization.
// Use like:
// int* foo V8_CONSTINIT;
@ -487,6 +522,21 @@ path. Add it with -I<path> to the command line
#endif
// A macro used to change the calling conventions to preserve all registers (no
// caller-saved registers). Use this for cold functions called from hot
// functions.
// Note: The attribute is considered experimental, so apply with care. Also,
// "preserve_most" is currently not handling the return value correctly, so only
// use it for functions returning void (see https://reviews.llvm.org/D141020).
// Use like:
// V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod();
#if V8_HAS_ATTRIBUTE_PRESERVE_MOST
# define V8_PRESERVE_MOST __attribute__((preserve_most))
#else
# define V8_PRESERVE_MOST /* NOT SUPPORTED */
#endif
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS)
# define V8_DEPRECATED(message) [[deprecated(message)]]
@ -884,4 +934,10 @@ V8 shared library set USING_V8_SHARED.
#undef V8_HAS_CPP_ATTRIBUTE
#if !defined(V8_STATIC_ROOTS)
#define V8_STATIC_ROOTS_BOOL false
#else
#define V8_STATIC_ROOTS_BOOL true
#endif
#endif // V8CONFIG_H_

View file

@ -31,6 +31,10 @@
"label": "//test:v8_d8_default",
"type": "script",
},
"d8_pgo": {
"label": "//test:d8_pgo",
"type": "script",
},
"generate-bytecode-expectations": {
"label": "//test/unittests:generate-bytecode-expectations",
"type": "script",
@ -72,7 +76,7 @@
"type": "script",
},
"unittests": {
"label": "//test/unittests:unittests",
"label": "//test/unittests:v8_unittests",
"type": "script",
},
"fuchsia-unittests": {

View file

@ -52,6 +52,10 @@
'linux-v8-dr': 'release_x64',
},
'client.v8': {
# PGO
'V8 Linux PGO instrumentation - builder' : 'builtins_profiling_x86',
'V8 Linux64 PGO instrumentation - builder' : 'builtins_profiling_x64',
# Linux.
'V8 Linux - builder': 'release_x86_gcmole',
'V8 Linux - debug builder': 'debug_x86',
@ -59,14 +63,13 @@
'V8 Linux - noi18n - debug builder': 'debug_x86_no_i18n',
'V8 Linux - verify csa - builder': 'release_x86_verify_csa',
# Linux64.
'V8 Linux64 - builder': 'release_x64',
'V8 Linux64 - builder': 'release_x64_gcmole',
'V8 Linux64 - builder (goma cache silo)': 'release_x64',
'V8 Linux64 - builder (reclient)': 'release_x64_reclient',
'V8 Linux64 - builder (reclient compare)': 'release_x64_reclient',
'V8 Linux64 - debug builder': 'debug_x64',
'V8 Linux64 - external code space - debug - builder': 'debug_x64_external_code_space',
'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom',
'V8 Linux64 - heap sandbox - debug - builder': 'debug_x64_heap_sandbox',
'V8 Linux64 - internal snapshot - builder': 'release_x64_internal',
'V8 Linux64 - debug - header includes - builder': 'debug_x64_header_includes',
'V8 Linux64 - no sandbox - debug builder': 'debug_x64_no_sandbox',
@ -80,6 +83,7 @@
'V8 Win32 - builder (reclient)': 'release_x86_minimal_symbols_reclient',
'V8 Win32 - builder (reclient compare)': 'release_x86_minimal_symbols_reclient',
'V8 Win32 - debug builder': 'debug_x86_minimal_symbols',
'V8 Win32 - msvc - debug builder': 'debug_x86_msvc',
# TODO(machenbach): Remove after switching to x64 on infra side.
'V8 Win64 ASAN - builder': 'release_x64_asan_no_lsan',
'V8 Win64 - builder': 'release_x64_minimal_symbols',
@ -103,14 +107,16 @@
# Sanitizers.
'V8 Linux64 ASAN - builder': 'release_x64_asan',
'V8 Linux64 TSAN - builder': 'release_x64_tsan',
'V8 Linux64 TSAN - debug builder': 'debug_x64_tsan_minimal_symbols',
'V8 Linux64 TSAN - no-concurrent-marking - builder': 'release_x64_tsan_no_cm',
'V8 Linux - arm64 - sim - CFI - builder': 'release_simulate_arm64_cfi',
'V8 Linux - arm64 - sim - MSAN - builder': 'release_simulate_arm64_msan',
# FYI.
'V8 iOS - sim - builder': 'release_x64_ios_simulator',
'V8 Linux64 - arm64 - sim - heap sandbox - debug - builder': 'debug_x64_heap_sandbox_arm64_sim',
'V8 Linux64 - arm64 - sim - no pointer compression - builder':
'release_simulate_arm64_no_pointer_compression',
'V8 Linux64 - coverage': 'release_x64_coverage',
'V8 Linux64 - coverage - debug': 'debug_x64_coverage',
'V8 Linux64 - cppgc-non-default - debug - builder': 'debug_x64_non_default_cppgc',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
'V8 Linux64 - disable runtime call stats - builder': 'release_x64_disable_runtime_call_stats',
@ -125,9 +131,9 @@
'V8 Linux64 - cfi - builder': 'release_x64_cfi',
'V8 Linux64 UBSan - builder': 'release_x64_ubsan',
'V8 Linux - vtunejit': 'debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage',
'V8 Linux64 - Fuzzilli - builder': 'release_x64_fuzzilli',
'V8 Linux64 - predictable - builder': 'release_x64_predictable',
'V8 Linux64 - verify deterministic': 'release_x64_verify_deterministic',
'V8 Linux - full debug builder': 'full_debug_x86',
'V8 Mac64 - full debug builder': 'full_debug_x64',
'V8 Random Deopt Fuzzer - debug': 'debug_x64',
@ -168,22 +174,28 @@
'V8 Clusterfuzz Linux64 UBSan - release builder':
'release_x64_ubsan_recover',
'V8 Clusterfuzz Linux64 ASAN sandbox testing - release builder':
'release_x64_asan_sandbox_testing',
'release_x64_asan_symbolized_expose_memory_corruption',
},
'client.v8.perf' : {
'V8 Arm - builder - perf': 'official_arm',
'V8 Arm - builder - pgo - perf': 'official_arm_pgo',
'V8 Android Arm - builder - perf': 'official_android_arm',
'V8 Android Arm - builder - pgo - perf': 'official_android_arm_pgo',
'V8 Android Arm64 - builder - perf': 'official_android_arm64',
'V8 Android Arm64 - builder - pgo - perf': 'official_android_arm64_pgo',
'V8 Linux - builder - perf': 'official_x86',
'V8 Linux - builder - pgo - perf': 'official_x86_pgo',
'V8 Linux64 - builder - perf': 'official_x64',
'V8 Linux64 - builder - pgo - perf': 'official_x64_pgo',
'V8 Mac Arm64 - builder - perf': 'official_mac_arm64',
'V8 Mac Arm64 - builder - pgo - perf': 'official_mac_arm64_pgo',
},
'client.v8.ports': {
# Arm.
'V8 Arm - builder': 'release_arm',
'V8 Arm - debug builder': 'debug_arm',
'V8 Android Arm - builder': 'release_android_arm',
'V8 Linux - arm - sim - builder': 'release_simulate_arm',
'V8 Linux - arm - sim - builder': 'release_simulate_arm_gcmole',
'V8 Linux - arm - sim - debug builder': 'debug_simulate_arm',
'V8 Linux - arm - sim - lite - builder': 'release_simulate_arm_lite',
'V8 Linux - arm - sim - lite - debug builder': 'debug_simulate_arm_lite',
@ -191,7 +203,7 @@
'V8 Android Arm64 - builder': 'release_android_arm64',
'V8 Android Arm64 - debug builder': 'debug_android_arm64',
'V8 Arm64 - builder': 'release_arm64_hard_float',
'V8 Linux - arm64 - sim - builder': 'release_simulate_arm64',
'V8 Linux - arm64 - sim - builder': 'release_simulate_arm64_gcmole',
'V8 Linux - arm64 - sim - debug builder': 'debug_simulate_arm64',
'V8 Linux - arm64 - sim - gc stress - builder': 'debug_simulate_arm64',
# Mips.
@ -209,6 +221,7 @@
'v8_android_arm_compile_rel': 'release_android_arm',
'v8_android_arm64_compile_dbg': 'debug_android_arm64',
'v8_android_arm64_n5x_compile_rel': 'release_android_arm64',
'v8_android_arm64_d8_compile_rel': 'release_android_arm64',
'v8_fuchsia_compile_rel': 'release_x64_fuchsia_trybot',
'v8_ios_simulator': 'release_x64_ios_simulator',
'v8_linux_compile_rel': 'release_x86_gcmole_trybot',
@ -225,6 +238,8 @@
'release_simulate_arm64_no_pointer_compression',
'v8_linux64_cppgc_non_default_compile_dbg': 'debug_x64_non_default_cppgc',
'v8_linux64_compile_dbg': 'debug_x64_trybot',
'v8_linux64_coverage_dbg': 'debug_x64_coverage',
'v8_linux64_coverage_rel': 'release_x64_coverage',
'v8_linux64_no_sandbox_compile_dbg': 'debug_x64_no_sandbox',
'v8_linux64_dict_tracking_compile_dbg': 'debug_x64_dict_tracking_trybot',
'v8_linux64_disable_runtime_call_stats_compile_rel': 'release_x64_disable_runtime_call_stats',
@ -235,16 +250,13 @@
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
'v8_linux64_gcc_light_compile_dbg': 'debug_x64_gcc',
'v8_linux64_gcc_compile_rel': 'release_x64_gcc',
'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
'v8_linux64_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox',
'v8_linux64_minor_mc_compile_dbg': 'debug_x64_trybot',
'v8_linux_arm64_sim_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox_arm64_sim',
'v8_linux64_fyi_compile_rel': 'release_x64_test_features_trybot',
'v8_linux64_nodcheck_compile_rel': 'release_x64',
'v8_linux64_perfetto_compile_dbg': 'debug_x64_perfetto',
'v8_linux64_no_pointer_compression_compile_rel': 'release_x64_no_pointer_compression',
'v8_linux64_compile_rel': 'release_x64_test_features_trybot',
'v8_linux64_compile_rel': 'release_x64_test_features_gcmole_trybot',
'v8_linux64_no_sandbox_compile_rel': 'release_x64_no_sandbox',
'v8_linux64_predictable_compile_rel': 'release_x64_predictable',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
@ -259,15 +271,18 @@
'v8_linux_riscv32_compile_rel': 'release_simulate_riscv32',
'v8_linux64_riscv64_compile_rel': 'release_simulate_riscv64',
'v8_linux64_tsan_compile_rel': 'release_x64_tsan_minimal_symbols',
'v8_linux64_tsan_compile_dbg': 'debug_x64_tsan_minimal_symbols',
'v8_linux64_tsan_no_cm_compile_rel': 'release_x64_tsan_no_cm',
'v8_linux64_tsan_isolates_compile_rel':
'release_x64_tsan_minimal_symbols',
'v8_linux64_ubsan_compile_rel': 'release_x64_ubsan_minimal_symbols',
'v8_linux64_verify_deterministic_rel': 'release_x64_verify_deterministic',
'v8_odroid_arm_compile_rel': 'release_arm',
'v8_linux_torque_compare': 'torque_compare',
# TODO(machenbach): Remove after switching to x64 on infra side.
'v8_win_compile_dbg': 'debug_x86_trybot',
'v8_win_compile_rel': 'release_x86_trybot',
'v8_win_msvc_light_compile_dbg': 'debug_x86_msvc',
'v8_win64_asan_compile_rel': 'release_x64_asan_no_lsan',
'v8_win64_msvc_light_compile_rel': 'release_x64_msvc',
'v8_win64_compile_dbg': 'debug_x64_minimal_symbols',
@ -284,15 +299,15 @@
'v8_mac64_compile_rel': 'release_x64_trybot',
'v8_mac64_dbg': 'debug_x64',
'v8_mac64_compile_dbg': 'debug_x64',
'v8_mac64_compile_full_compile_dbg': 'full_debug_x64',
'v8_mac64_noopt_compile_dbg': 'full_debug_x64',
'v8_mac64_asan_compile_rel': 'release_x64_asan_no_lsan',
'v8_linux_arm_compile_rel': 'release_simulate_arm_trybot',
'v8_linux_arm_compile_rel': 'release_simulate_arm_gcmole_trybot',
'v8_linux_arm_lite_compile_dbg': 'debug_simulate_arm_lite',
'v8_linux_arm_lite_compile_rel': 'release_simulate_arm_lite_trybot',
'v8_linux_arm_compile_dbg': 'debug_simulate_arm',
'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot',
'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm',
'v8_linux_arm64_compile_rel': 'release_simulate_arm64_trybot',
'v8_linux_arm64_compile_rel': 'release_simulate_arm64_gcmole_trybot',
'v8_linux_arm64_cfi_compile_rel' : 'release_simulate_arm64_cfi',
'v8_linux_arm64_compile_dbg': 'debug_simulate_arm64',
'v8_linux_arm64_gc_stress_compile_dbg': 'debug_simulate_arm64',
@ -399,20 +414,26 @@
'debug_bot', 'simulate_arm64', 'asan', 'lsan'],
# Release configs for simulators.
'release_simulate_arm': [
'release_bot', 'simulate_arm'],
'release_simulate_arm_gcmole': [
'release_bot', 'simulate_arm', 'gcmole'],
'release_simulate_arm_lite': [
'release_bot', 'simulate_arm', 'v8_enable_lite_mode'],
'release_simulate_arm_trybot': [
'release_trybot', 'simulate_arm'],
'release_simulate_arm_gcmole_trybot': [
'release_trybot', 'simulate_arm', 'gcmole'],
'release_simulate_arm_lite_trybot': [
'release_trybot', 'simulate_arm', 'v8_enable_lite_mode'],
'release_simulate_arm_trybot': [
'release_trybot', 'simulate_arm'],
'release_simulate_arm64': [
'release_bot', 'simulate_arm64'],
'release_simulate_arm64_cfi': [
'release_bot', 'simulate_arm64', 'v8_control_flow_integrity'],
'release_simulate_arm64_gcmole': [
'release_bot', 'simulate_arm64', 'gcmole'],
'release_simulate_arm64_gcmole_trybot': [
'release_trybot', 'simulate_arm64', 'gcmole'],
'release_simulate_arm64_no_pointer_compression': [
'release_bot', 'simulate_arm64_no_sandbox', 'dcheck_always_on',
'release_bot', 'simulate_arm64', 'no_sandbox', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_disable_pointer_compression'],
'release_simulate_arm64_msan': [
'release_bot', 'simulate_arm64', 'msan'],
@ -445,7 +466,7 @@
'debug_arm64': [
'debug_bot', 'arm64'],
'debug_arm64_no_pointer_compression': [
'debug_bot', 'arm64_no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks',
'debug_bot', 'arm64', 'no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks',
'v8_disable_pointer_compression'],
'full_debug_arm64': [
'debug_bot', 'arm64', 'v8_full_debug'],
@ -466,15 +487,29 @@
# Official configs for arm
'official_arm': [
'release_bot', 'arm', 'hard_float', 'official', 'disable_pgo'],
'release_bot', 'arm', 'hard_float', 'official', 'disable_chrome_pgo'],
'official_arm_pgo': [
'release_bot', 'arm', 'hard_float', 'official', 'disable_chrome_pgo',
'builtins_optimization'],
'official_android_arm': [
'release_bot', 'arm', 'android', 'minimal_symbols',
'android_strip_outputs', 'official', 'disable_pgo'],
'android_strip_outputs', 'official', 'disable_chrome_pgo'],
'official_android_arm_pgo': [
'release_bot', 'arm', 'android', 'minimal_symbols',
'android_strip_outputs', 'official', 'disable_chrome_pgo',
'builtins_optimization'],
'official_android_arm64': [
'release_bot', 'arm64', 'android', 'minimal_symbols',
'android_strip_outputs', 'official', 'disable_pgo'],
'android_strip_outputs', 'official', 'disable_chrome_pgo'],
'official_android_arm64_pgo': [
'release_bot', 'arm64', 'android', 'minimal_symbols',
'android_strip_outputs', 'official', 'disable_chrome_pgo',
'builtins_optimization'],
'official_mac_arm64': [
'release_bot', 'arm64', 'official', 'disable_pgo'],
'release_bot', 'arm64', 'official', 'disable_chrome_pgo'],
'official_mac_arm64_pgo': [
'release_bot', 'arm64', 'official', 'disable_chrome_pgo',
'builtins_optimization'],
# Release configs for x64.
'release_x64': [
@ -492,6 +527,9 @@
'release_x64_asan_no_lsan_verify_heap_dchecks': [
'release_bot', 'x64', 'asan', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_heap'],
'release_x64_asan_symbolized_expose_memory_corruption': [
'release_bot', 'x64', 'asan', 'symbolized',
'v8_expose_memory_corruption_api'],
'release_x64_asan_symbolized_verify_heap': [
'release_bot', 'x64', 'asan', 'lsan', 'symbolized',
'v8_verify_heap'],
@ -499,9 +537,13 @@
'release_bot', 'x64', 'cfi'],
'release_x64_cfi_clusterfuzz': [
'release_bot', 'x64', 'cfi_clusterfuzz'],
'release_x64_coverage': [
'release_bot', 'x64', 'clang_coverage'],
'release_x64_fuzzilli': [
'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks',
'v8_verify_heap', 'v8_verify_csa', 'fuzzilli'],
'release_x64_gcmole': [
'release_bot', 'x64', 'gcmole'],
'release_x64_msvc': [
'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'],
'release_x64_correctness_fuzzer' : [
@ -514,9 +556,6 @@
'release_trybot', 'x64', 'fuchsia'],
'release_x64_gcc': [
'release_bot_no_goma', 'x64', 'gcc', 'lld', 'no_custom_libcxx'],
'release_x64_gcc_coverage': [
'release_bot_no_goma', 'x64', 'coverage', 'gcc', 'lld',
'no_custom_libcxx', 'no_sysroot'],
'release_x64_ios_simulator': [
'release_bot', 'x64', 'ios_simulator'],
'release_x64_internal': [
@ -526,14 +565,16 @@
'release_x64_minimal_symbols_reclient': [
'release_bot_reclient', 'x64', 'minimal_symbols'],
'release_x64_no_pointer_compression': [
'release_bot', 'x64_no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_javascript_promise_hooks',
'v8_disable_pointer_compression'],
'release_bot', 'x64', 'no_sandbox', 'dcheck_always_on', 'v8_enable_slow_dchecks',
'v8_enable_javascript_promise_hooks', 'v8_disable_pointer_compression'],
'release_x64_reclient': [
'release_bot_reclient', 'x64'],
'release_x64_no_sandbox': [
'release_bot', 'x64_no_sandbox'],
'release_bot', 'x64', 'no_sandbox'],
'release_x64_trybot': [
'release_trybot', 'x64'],
'release_x64_test_features_gcmole_trybot': [
'release_trybot', 'x64', 'v8_enable_test_features', 'gcmole'],
'release_x64_test_features_trybot': [
'release_trybot', 'x64', 'v8_enable_test_features'],
'release_x64_tsan': [
@ -553,15 +594,17 @@
'release_x64_verify_csa': [
'release_bot', 'x64', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
'release_x64_verify_deterministic': [
'release_bot', 'x64', 'v8_verify_deterministic'],
'release_x64_webassembly_disabled': [
'release_bot', 'x64', 'webassembly_disabled'],
'release_x64_asan_sandbox_testing': [
'release_bot', 'x64', 'asan', 'symbolized', 'v8_enable_sandbox_future',
'v8_expose_memory_corruption_api'],
# Official configs for x64.
'official_x64': [
'release_bot', 'x64', 'official', 'disable_pgo'],
'release_bot', 'x64', 'official', 'disable_chrome_pgo'],
'official_x64_pgo': [
'release_bot', 'x64', 'official', 'disable_chrome_pgo',
'builtins_optimization'],
# Debug configs for x64.
'debug_x64': [
@ -573,6 +616,8 @@
'x64', 'asan'],
'debug_x64_conservative_stack_scanning': [
'debug_bot', 'x64', 'conservative_stack_scanning'],
'debug_x64_coverage': [
'debug_bot', 'x64', 'clang_coverage'],
'debug_x64_custom': [
'debug_bot', 'x64', 'v8_snapshot_custom'],
'debug_x64_external_code_space': [
@ -583,10 +628,6 @@
'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'no_custom_libcxx'],
'debug_x64_header_includes': [
'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_heap_sandbox': [
'debug_bot', 'x64', 'v8_enable_sandbox_future', 'v8_expose_memory_corruption_api'],
'debug_x64_heap_sandbox_arm64_sim': [
'debug_bot', 'simulate_arm64', 'v8_enable_sandbox_future', 'v8_expose_memory_corruption_api'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
'debug_x64_non_default_cppgc': [
@ -594,7 +635,7 @@
'debug_x64_perfetto': [
'debug_bot', 'x64', 'perfetto'],
'debug_x64_no_sandbox': [
'debug_bot', 'x64_no_sandbox'],
'debug_bot', 'x64', 'no_sandbox'],
'debug_x64_single_generation': [
'debug_bot', 'x64', 'v8_enable_single_generation'],
'debug_x64_trybot': [
@ -603,6 +644,9 @@
'debug_trybot', 'x64', 'v8_enable_dict_property_const_tracking'],
'debug_x64_trybot_custom': [
'debug_trybot', 'x64', 'v8_snapshot_custom'],
'debug_x64_tsan_minimal_symbols': [
'debug_bot_no_slow_dchecks', 'minimal_symbols', 'x64', 'dcheck_always_on',
'tsan', 'v8_disable_verify_heap', 'v8_fast_mksnapshot'],
'full_debug_x64': [
'debug_bot', 'x64', 'v8_full_debug'],
@ -613,6 +657,8 @@
'debug_bot', 'x86', 'asan', 'lsan'],
'debug_x86_minimal_symbols': [
'debug_bot', 'x86', 'minimal_symbols'],
'debug_x86_msvc': [
'debug_bot_no_goma', 'x86', 'minimal_symbols', 'msvc'],
'debug_x86_no_i18n': [
'debug_bot', 'x86', 'v8_no_i18n'],
'debug_x86_trybot': [
@ -650,11 +696,18 @@
# Official configs for x86.
'official_x86': [
'release_bot', 'x86', 'official', 'disable_pgo'],
'release_bot', 'x86', 'official', 'disable_chrome_pgo'],
'official_x86_pgo': [
'release_bot', 'x86', 'official', 'disable_chrome_pgo',
'builtins_optimization'],
# Torque compare test
'torque_compare': [
'release_bot', 'verify_torque']
'release_bot', 'verify_torque'],
# PGO
'builtins_profiling_x86': ['builtins_profiling', 'x86'],
'builtins_profiling_x64': ['builtins_profiling', 'x64'],
},
'mixins': {
@ -671,11 +724,7 @@
},
'arm64': {
'gn_args': 'target_cpu="arm64" v8_enable_sandbox=true',
},
'arm64_no_sandbox': {
'gn_args': 'target_cpu="arm64" v8_enable_sandbox=false',
'gn_args': 'target_cpu="arm64"',
},
'asan': {
@ -683,6 +732,15 @@
'gn_args': 'is_asan=true',
},
'builtins_profiling': {
'mixins' : ['release_bot_reclient'],
'gn_args': 'v8_enable_builtins_profiling=true',
},
'builtins_optimization': {
'gn_args': 'v8_enable_builtins_optimization=true',
},
'cfi': {
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_icall=true '
@ -699,13 +757,12 @@
'gn_args': 'is_clang=true',
},
'conservative_stack_scanning': {
'gn_args': 'v8_enable_conservative_stack_scanning=true '
'v8_enable_inner_pointer_resolution_mb=true',
'clang_coverage': {
'gn_args': 'use_clang_coverage=true',
},
'coverage': {
'gn_args': 'v8_code_coverage=true',
'conservative_stack_scanning': {
'gn_args': 'v8_enable_conservative_stack_scanning=true',
},
'dcheck_always_on': {
@ -716,6 +773,12 @@
'gn_args': 'is_debug=true v8_enable_backtrace=true',
},
'debug_bot_no_slow_dchecks': {
'mixins': [
'debug', 'shared', 'goma', 'v8_disable_slow_dchecks',
'v8_optimized_debug', 'v8_enable_google_benchmark'],
},
'debug_bot': {
'mixins': [
'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
@ -741,7 +804,7 @@
'v8_enable_atomic_object_field_writes=false ',
},
'disable_pgo': {
'disable_chrome_pgo': {
'gn_args': 'chrome_pgo_phase=0',
},
@ -793,12 +856,12 @@
'msan': {
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_msan=true msan_track_origins=2',
'gn_args': 'is_msan=true msan_track_origins=2 instrumented_libraries_release="focal"',
},
'msan_no_origins': {
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_msan=true msan_track_origins=0',
'gn_args': 'is_msan=true msan_track_origins=0 instrumented_libraries_release="focal"',
},
'msvc': {
@ -813,8 +876,8 @@
'gn_args': 'use_goma=false',
},
'no_sysroot': {
'gn_args': 'use_sysroot=false',
'no_sandbox': {
'gn_args': 'v8_enable_sandbox=false',
},
'non_default_cppgc': {
@ -862,11 +925,7 @@
},
'simulate_arm64': {
'gn_args': 'target_cpu="x64" v8_target_cpu="arm64" v8_enable_sandbox=true',
},
'simulate_arm64_no_sandbox': {
'gn_args': 'target_cpu="x64" v8_target_cpu="arm64" v8_enable_sandbox=false',
'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"',
},
'simulate_loong64': {
@ -938,8 +997,8 @@
'gn_args': 'v8_enable_runtime_call_stats=false',
},
'v8_enable_sandbox_future': {
'gn_args': 'v8_enable_sandbox_future=true',
'v8_disable_verify_heap': {
'gn_args': 'v8_enable_verify_heap=false',
},
'v8_expose_memory_corruption_api': {
@ -954,6 +1013,10 @@
'gn_args': 'v8_enable_slow_dchecks=true',
},
'v8_disable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=false',
},
'v8_enable_javascript_promise_hooks': {
'gn_args': 'v8_enable_javascript_promise_hooks=true',
},
@ -989,6 +1052,10 @@
'gn_args': 'v8_enable_vtunejit=true v8_enable_vtunetracemark=true',
},
'v8_fast_mksnapshot': {
'gn_args': 'v8_enable_fast_mksnapshot=true',
},
'v8_full_debug': {
'gn_args': 'v8_optimized_debug=false',
},
@ -1018,6 +1085,10 @@
'gn_args': 'v8_enable_verify_csa=true',
},
'v8_verify_deterministic': {
'gn_args': 'v8_verify_deterministic_mksnapshot=true',
},
's390x': {
'gn_args': 'target_cpu="s390x" v8_target_cpu="s390x"',
},
@ -1031,11 +1102,7 @@
},
'x64': {
'gn_args': 'target_cpu="x64" v8_enable_sandbox=true',
},
'x64_no_sandbox': {
'gn_args': 'target_cpu="x64" v8_enable_sandbox=false',
'gn_args': 'target_cpu="x64"',
},
'x86': {

View file

@ -89,7 +89,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-avx']
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
@ -128,8 +129,8 @@
{'name': 'benchmarks', 'variant': 'extra'},
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'test262', 'shards': 2},
{'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'test262', 'shards': 4},
{'name': 'test262', 'variant': 'extra', 'shards': 4},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
{
@ -149,7 +150,7 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'variant': 'default', 'shards': 2},
],
},
@ -179,7 +180,7 @@
],
'shards': 4,
},
{'name': 'gcmole'},
{'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4},
],
},
'v8_linux_optional_rel': {
@ -210,6 +211,7 @@
'--extra-flags',
'--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx',
],
'shards': 2,
},
{
'name': 'v8testing',
@ -237,6 +239,7 @@
'--extra-flags',
'--noenable-ssse3 --noenable-sse4-1 --noenable-avx',
],
'shards': 2,
},
{
'name': 'v8testing',
@ -258,6 +261,7 @@
'suffix': 'nosse4',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'],
'shards': 2,
},
{
'name': 'v8testing',
@ -275,7 +279,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-avx']
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2,
},
{
'name': 'v8testing',
@ -325,6 +330,7 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
{'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
],
},
##############################################################################
@ -348,10 +354,28 @@
{'name': 'benchmarks'},
{'name': 'mozilla'},
{'name': 'optimize_for_size'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 3},
],
},
'v8_linux64_coverage_dbg': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default'},
{'name': 'v8testing', 'variant': 'future'},
],
},
'v8_linux64_coverage_rel': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default'},
{'name': 'v8testing', 'variant': 'future'},
],
},
'v8_linux64_cppgc_non_default_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@ -361,6 +385,14 @@
{'name': 'v8testing', 'shards': 3},
],
},
'v8_linux64_css_dbg': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
],
},
'v8_linux64_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@ -368,12 +400,12 @@
},
'tests': [
{'name': 'benchmarks', 'shards': 2},
{'name': 'benchmarks', 'variant': 'extra'},
{'name': 'benchmarks', 'variant': 'extra', 'shards': 2},
{'name': 'mjsunit_sp_frame_access', 'shards': 2},
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'test262', 'variant': 'extra', 'shards': 9},
{'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'test262', 'variant': 'extra', 'shards': 12},
{'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 5},
{'name': 'v8testing', 'variant': 'minor_mc'},
@ -383,6 +415,7 @@
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_linux64_dict_tracking_dbg': {
@ -430,14 +463,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Maglev.
{'name': 'mjsunit', 'variant': 'maglev'},
# Stress maglev.
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Stress maglev-future.
{'name': 'mjsunit', 'variant': 'maglev_future'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'v8_linux64_gc_stress_custom_snapshot_dbg': {
@ -481,22 +510,6 @@
{'name': 'v8testing'},
],
},
'v8_linux64_gcov_coverage': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
],
},
'v8_linux64_heap_sandbox_dbg': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 4},
],
},
'v8_linux64_minor_mc_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@ -512,11 +525,11 @@
},
'v8_linux64_msan_rel': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
'os': 'Ubuntu-20.04',
},
'tests': [
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 5},
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 4},
],
},
'v8_linux64_nodcheck_rel': {
@ -533,9 +546,9 @@
{'name': 'mozilla', 'variant': 'assert_types'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'perf_integration'},
{'name': 'test262', 'shards': 2},
{'name': 'test262', 'variant': 'assert_types', 'shards': 2},
{'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'test262', 'shards': 4},
{'name': 'test262', 'variant': 'assert_types'},
{'name': 'test262', 'variant': 'extra', 'shards': 4},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'assert_types'},
{'name': 'v8testing', 'variant': 'extra'},
@ -543,6 +556,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_linux64_perfetto_dbg': {
@ -589,7 +603,7 @@
{'name': 'mjsunit_sp_frame_access'},
{'name': 'optimize_for_size'},
{'name': 'test262', 'shards': 4},
{'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'test262', 'variant': 'extra', 'shards': 4},
{'name': 'v8initializers'},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
@ -599,6 +613,21 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# GCMole.
{'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
{
'name': 'gcmole_v2',
'variant': 'x64',
'suffix': 'test single host',
'test_args': ['--test-run'],
},
{
'name': 'gcmole_v3',
'variant': 'x64',
'suffix': 'test multi host',
'test_args': ['--test-run'],
},
],
},
'v8_linux64_predictable_rel': {
@ -630,11 +659,23 @@
{'name': 'mozilla', 'shards': 2},
{'name': 'test262', 'variant': 'default', 'shards': 5},
{'name': 'v8testing', 'shards': 6},
{'name': 'v8testing', 'variant': 'extra', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 2},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
],
},
'v8_linux64_tsan_dbg': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks', 'shards': 2},
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default', 'shards': 5},
{'name': 'v8testing', 'shards': 12},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
],
},
'v8_linux64_tsan_no_cm_rel': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -699,14 +740,6 @@
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12},
],
},
'v8_linux_arm64_sim_heap_sandbox_dbg': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 14},
],
},
'v8_linux_arm64_rel': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -717,6 +750,7 @@
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
{'name': 'v8testing', 'variant': 'extra', 'shards': 14},
{'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
],
},
'v8_linux_arm64_cfi_rel': {
@ -787,7 +821,7 @@
'v8_win_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-7-SP1',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -797,7 +831,7 @@
'v8_win_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-7-SP1',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -809,7 +843,7 @@
# Win64
'v8_win64_asan_rel': {
'swarming_dimensions' : {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
@ -818,7 +852,7 @@
'v8_win64_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -830,7 +864,7 @@
'v8_win64_msvc_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -841,7 +875,7 @@
'v8_win64_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -855,7 +889,7 @@
'v8_mac64_asan_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@ -864,7 +898,7 @@
'v8_mac64_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@ -876,16 +910,25 @@
'v8_mac64_gc_stress_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6},
],
},
'v8_mac64_noopt_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 6},
],
},
'v8_mac64_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@ -902,6 +945,9 @@
},
'tests': [
{'name': 'v8testing'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_mac_arm64_dbg': {
@ -912,6 +958,9 @@
},
'tests': [
{'name': 'v8testing'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_mac_arm64_full_dbg': {
@ -922,6 +971,9 @@
},
'tests': [
{'name': 'v8testing'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'v8_mac_arm64_no_pointer_compression_dbg': {
@ -937,7 +989,7 @@
'v8_mac_arm64_sim_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@ -947,7 +999,7 @@
'v8_mac_arm64_sim_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@ -957,7 +1009,7 @@
'v8_mac_arm64_sim_nodcheck_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@ -1042,7 +1094,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-avx']
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
@ -1050,7 +1103,7 @@
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
{'name': 'gcmole'},
{'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4},
],
},
'V8 Linux - arm64 - sim - CFI': {
@ -1064,10 +1117,10 @@
},
'V8 Linux - arm64 - sim - MSAN': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
'os': 'Ubuntu-20.04',
},
'tests': [
{'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 4},
],
},
@ -1086,9 +1139,9 @@
{'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
{'name': 'test262', 'shards': 6},
{'name': 'test262', 'shards': 12},
{'name': 'test262', 'variant': 'code_serializer', 'shards': 2},
{'name': 'test262', 'variant': 'extra', 'shards': 5},
{'name': 'test262', 'variant': 'extra', 'shards': 10},
{'name': 'v8testing', 'shards': 3},
{
'name': 'v8testing',
@ -1096,7 +1149,7 @@
'test_args': ['--isolates'],
'shards': 4
},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 4},
# Nosse3.
{
'name': 'mozilla',
@ -1107,7 +1160,8 @@
'name': 'test262',
'suffix': 'nosse3',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx']
'test_args': ['--extra-flags', '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
@ -1125,7 +1179,8 @@
'name': 'test262',
'suffix': 'nossse3',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx']
'test_args': ['--extra-flags', '--noenable-ssse3 --noenable-sse4-1 --noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
@ -1143,7 +1198,8 @@
'name': 'test262',
'suffix': 'nosse4',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx']
'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
@ -1161,7 +1217,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-avx']
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
@ -1203,7 +1260,7 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'variant': 'default'},
],
},
@ -1224,7 +1281,7 @@
},
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing'},
],
},
@ -1236,6 +1293,14 @@
{'name': 'v8testing'},
],
},
'V8 Linux PGO instrumentation - builder' : {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'pgo_instrumentation'}
],
},
'V8 Linux64': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
@ -1253,7 +1318,7 @@
{'name': 'optimize_for_size'},
{'name': 'perf_integration'},
{'name': 'test262', 'shards': 2},
{'name': 'test262', 'variant': 'assert_types'},
{'name': 'test262', 'variant': 'assert_types', 'shards': 2},
{'name': 'test262', 'variant': 'extra', 'shards': 2},
{'name': 'v8initializers'},
{'name': 'v8testing'},
@ -1264,6 +1329,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Noavx.
{
'name': 'mozilla',
@ -1274,13 +1340,28 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-avx']
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
'suffix': 'noavx',
'test_args': ['--extra-flags', '--noenable-avx']
},
# GCMole.
{'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
{
'name': 'gcmole_v2',
'variant': 'x64',
'suffix': 'test single host',
'test_args': ['--test-run'],
},
{
'name': 'gcmole_v3',
'variant': 'x64',
'suffix': 'test multi host',
'test_args': ['--test-run'],
},
],
},
'V8 Linux64 - cfi': {
@ -1291,10 +1372,28 @@
{'name': 'benchmarks'},
{'name': 'mozilla'},
{'name': 'optimize_for_size'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
],
},
'V8 Linux64 - coverage': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default'},
{'name': 'v8testing', 'variant': 'future'},
],
},
'V8 Linux64 - coverage - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default'},
{'name': 'v8testing', 'variant': 'future'},
],
},
'V8 Linux64 - custom snapshot - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -1318,15 +1417,16 @@
{'name': 'test262', 'shards': 7},
{'name': 'test262', 'variant': 'extra', 'shards': 5},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 4},
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining', 'shards': 2},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Noavx.
{
'name': 'mozilla',
@ -1337,7 +1437,8 @@
'name': 'test262',
'suffix': 'noavx',
'variant': 'default',
'test_args': ['--extra-flags', '--noenable-avx']
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
{
'name': 'v8testing',
@ -1380,14 +1481,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Maglev.
{'name': 'mjsunit', 'variant': 'maglev'},
# Stress maglev.
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Stress maglev-future.
{'name': 'mjsunit', 'variant': 'maglev_future'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 - cppgc-non-default - debug': {
@ -1446,14 +1543,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Maglev.
{'name': 'mjsunit', 'variant': 'maglev'},
# Stress maglev.
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Stress maglev-future.
{'name': 'mjsunit', 'variant': 'maglev_future'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 gcc': {
@ -1489,22 +1582,6 @@
},
],
},
'V8 Linux64 - gcov coverage': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
],
},
'V8 Linux64 - heap sandbox - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
],
},
'V8 Linux64 - internal snapshot': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -1545,7 +1622,7 @@
},
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing'},
],
},
@ -1568,6 +1645,14 @@
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
],
},
'V8 Linux64 css - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
],
},
'V8 Linux64 GC Stress - custom snapshot': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -1580,6 +1665,14 @@
},
],
},
'V8 Linux64 PGO instrumentation - builder' : {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'pgo_instrumentation'}
],
},
'V8 Linux64 TSAN': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -1594,6 +1687,18 @@
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
],
},
'V8 Linux64 TSAN - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks', 'shards': 2},
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default', 'shards': 5},
{'name': 'v8testing', 'shards': 12},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
],
},
'V8 Linux64 TSAN - stress-incremental-marking': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -1667,7 +1772,7 @@
'V8 Mac64': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@ -1679,7 +1784,7 @@
'V8 Mac64 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'mozilla'},
@ -1691,7 +1796,7 @@
'V8 Mac64 ASAN': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'v8testing', 'shards': 10},
@ -1700,7 +1805,7 @@
'V8 Mac64 GC Stress': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6},
@ -1720,6 +1825,9 @@
'tests': [
{'name': 'v8testing'},
{'name': 'v8testing', 'variant': 'extra'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'V8 Mac - arm64 - debug': {
@ -1736,6 +1844,9 @@
'tests': [
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
{'name': 'mjsunit', 'variant': 'stress_maglev'},
],
},
'V8 Mac - arm64 - no pointer compression debug': {
@ -1751,7 +1862,7 @@
'V8 Mac - arm64 - sim - debug': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'swarming_task_attrs': {
'expiration': 14400,
@ -1766,7 +1877,7 @@
'V8 Mac - arm64 - sim - release': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
'os': 'Mac-12',
},
'swarming_task_attrs': {
'expiration': 14400,
@ -1781,39 +1892,39 @@
'V8 Win32': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Windows-7-SP1',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing'},
],
},
'V8 Win32 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Windows-7-SP1',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 5},
],
},
'V8 Win64': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
],
},
'V8 Win64 - debug': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -1824,17 +1935,17 @@
},
'V8 Win64 - msvc': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
],
},
'V8 Win64 ASAN': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
@ -1951,7 +2062,8 @@
'name': 'test262',
'suffix': 'armv8-a',
'variant': 'default',
'test_args': ['--extra-flags', '--enable-armv8']
'test_args': ['--extra-flags', '--enable-armv8'],
'shards': 2
},
{
'name': 'v8testing',
@ -1970,6 +2082,7 @@
'suffix': 'novfp3',
'variant': 'default',
'test_args': ['--novfp3'],
'shards': 2
},
{
'name': 'v8testing',
@ -1977,6 +2090,8 @@
'test_args': ['--novfp3'],
'shards': 6
},
# GCMole.
{'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
],
},
'V8 Linux - arm - sim - debug': {
@ -2055,9 +2170,10 @@
'tests': [
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
],
},
'V8 Linux - arm64 - sim - debug': {
@ -2093,19 +2209,6 @@
},
],
},
'V8 Linux64 - arm64 - sim - heap sandbox - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 7200,
'priority': 35,
},
'tests': [
{'name': 'v8testing', 'shards': 14},
],
},
'V8 Linux - loong64 - sim': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
@ -2129,7 +2232,7 @@
'priority': 35,
},
'tests': [
{'name': 'test262', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 4},
],
},

9
deps/v8/src/DEPS vendored
View file

@ -10,19 +10,20 @@ include_rules = [
"-src/bigint",
"+src/bigint/bigint.h",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-assembler.h",
"+src/compiler/turbofan.h",
"+src/compiler/wasm-compiler-definitions.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/basic-memory-chunk.h",
"+src/heap/code-range.h",
"+src/heap/combined-heap.h",
"+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
"+src/heap/factory-inl.h",
# TODO(v8:10496): Don't expose so much (through transitive includes) outside
# of heap/.
"+src/heap/gc-tracer.h",
"+src/heap/gc-tracer-inl.h",
"+src/heap/heap.h",
"+src/heap/heap-verifier.h",
"+src/heap/heap-inl.h",
@ -76,6 +77,7 @@ include_rules = [
"+starboard",
# Using cppgc inside v8 is not (yet) allowed.
"-include/cppgc",
"+include/cppgc/common.h",
"+include/cppgc/platform.h",
"+include/cppgc/source-location.h",
]
@ -84,7 +86,8 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
"+include/libplatform/v8-tracing.h",
"+perfetto/tracing.h"
"+perfetto/tracing/track_event.h",
"+perfetto/tracing/track_event_legacy.h"
],
"d8-platforms\.cc": [
"+include/libplatform/libplatform.h",

View file

@ -11,51 +11,51 @@
#include "src/execution/vm-state-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/instance-type.h"
#include "src/objects/slots-inl.h"
#include "v8-isolate.h"
namespace v8 {
namespace internal {
void Object::VerifyApiCallResultType() {
#if DEBUG
if (IsSmi()) return;
bool Object::IsApiCallResultType() const {
if (IsSmi()) return true;
DCHECK(IsHeapObject());
if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
IsBigInt() || IsUndefined() || IsTrue() || IsFalse() || IsNull())) {
FATAL("API call returned invalid object");
}
#endif // DEBUG
return (IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
IsBigInt() || IsUndefined() || IsTrue() || IsFalse() || IsNull());
}
#endif // DEBUG
CustomArgumentsBase::CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
template <typename T>
CustomArguments<T>::~CustomArguments() {
slot_at(kReturnValueOffset).store(Object(kHandleZapValue));
slot_at(kReturnValueIndex).store(Object(kHandleZapValue));
}
template <typename T>
template <typename V>
Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) const {
// Check the ReturnValue.
FullObjectSlot slot = slot_at(kReturnValueOffset);
FullObjectSlot slot = slot_at(kReturnValueIndex);
// Nothing was set, return empty handle as per previous behaviour.
if ((*slot).IsTheHole(isolate)) return Handle<V>();
Handle<V> result = Handle<V>::cast(Handle<Object>(slot.location()));
result->VerifyApiCallResultType();
return result;
Object raw_object = *slot;
if (raw_object.IsTheHole(isolate)) return Handle<V>();
DCHECK(raw_object.IsApiCallResultType());
return Handle<V>::cast(Handle<Object>(slot.location()));
}
inline JSObject PropertyCallbackArguments::holder() {
inline JSObject PropertyCallbackArguments::holder() const {
return JSObject::cast(*slot_at(T::kHolderIndex));
}
inline Object PropertyCallbackArguments::receiver() {
inline Object PropertyCallbackArguments::receiver() const {
return *slot_at(T::kThisIndex);
}
inline JSReceiver FunctionCallbackArguments::holder() {
inline JSReceiver FunctionCallbackArguments::holder() const {
return JSReceiver::cast(*slot_at(T::kHolderIndex));
}

View file

@ -6,6 +6,7 @@
#define V8_API_API_ARGUMENTS_H_
#include "include/v8-template.h"
#include "src/builtins/builtins-utils.h"
#include "src/execution/isolate.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
@ -24,7 +25,8 @@ class CustomArgumentsBase : public Relocatable {
template <typename T>
class CustomArguments : public CustomArgumentsBase {
public:
static const int kReturnValueOffset = T::kReturnValueIndex;
static constexpr int kReturnValueIndex = T::kReturnValueIndex;
static_assert(T::kSize == sizeof(T));
~CustomArguments() override;
@ -38,19 +40,20 @@ class CustomArguments : public CustomArgumentsBase {
: CustomArgumentsBase(isolate) {}
template <typename V>
Handle<V> GetReturnValue(Isolate* isolate);
Handle<V> GetReturnValue(Isolate* isolate) const;
inline Isolate* isolate() {
inline Isolate* isolate() const {
return reinterpret_cast<Isolate*>((*slot_at(T::kIsolateIndex)).ptr());
}
inline FullObjectSlot slot_at(int index) {
inline FullObjectSlot slot_at(int index) const {
// This allows index == T::kArgsLength so "one past the end" slots
// can be retrieved for iterating purposes.
DCHECK_LE(static_cast<unsigned>(index),
static_cast<unsigned>(T::kArgsLength));
return FullObjectSlot(values_ + index);
}
Address values_[T::kArgsLength];
};
@ -69,14 +72,14 @@ class PropertyCallbackArguments final
public:
using T = PropertyCallbackInfo<Value>;
using Super = CustomArguments<T>;
static const int kArgsLength = T::kArgsLength;
static const int kThisIndex = T::kThisIndex;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
static constexpr int kArgsLength = T::kArgsLength;
static constexpr int kThisIndex = T::kThisIndex;
static constexpr int kHolderIndex = T::kHolderIndex;
static constexpr int kDataIndex = T::kDataIndex;
static constexpr int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
static constexpr int kIsolateIndex = T::kIsolateIndex;
static constexpr int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object data, Object self,
JSObject holder, Maybe<ShouldThrow> should_throw);
@ -161,13 +164,13 @@ class PropertyCallbackArguments final
GenericNamedPropertyGetterCallback f, Handle<Name> name,
Handle<Object> info, Handle<Object> receiver = Handle<Object>());
inline JSObject holder();
inline Object receiver();
inline JSObject holder() const;
inline Object receiver() const;
#ifdef DEBUG
// This stores current value of Isolate::javascript_execution_counter().
// It's used for detecting whether JavaScript code was executed between
// PropertyCallbackArguments's constructior and destructor.
// PropertyCallbackArguments's constructor and destructor.
uint32_t javascript_execution_counter_;
#endif // DEBUG
};
@ -177,13 +180,21 @@ class FunctionCallbackArguments
public:
using T = FunctionCallbackInfo<Value>;
using Super = CustomArguments<T>;
static const int kArgsLength = T::kArgsLength;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
static constexpr int kArgsLength = T::kArgsLength;
static constexpr int kArgsLengthWithReceiver = T::kArgsLengthWithReceiver;
static constexpr int kHolderIndex = T::kHolderIndex;
static constexpr int kDataIndex = T::kDataIndex;
static constexpr int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kNewTargetIndex = T::kNewTargetIndex;
static constexpr int kIsolateIndex = T::kIsolateIndex;
static constexpr int kNewTargetIndex = T::kNewTargetIndex;
static_assert(T::kThisValuesIndex == BuiltinArguments::kReceiverArgsOffset);
// Make sure all FunctionCallbackInfo constants are in sync.
static_assert(T::kImplicitArgsOffset == offsetof(T, implicit_args_));
static_assert(T::kValuesOffset == offsetof(T, values_));
static_assert(T::kLengthOffset == offsetof(T, length_));
FunctionCallbackArguments(Isolate* isolate, Object data, Object holder,
HeapObject new_target, Address* argv, int argc);
@ -199,12 +210,17 @@ class FunctionCallbackArguments
inline Handle<Object> Call(CallHandlerInfo handler);
private:
inline JSReceiver holder();
inline JSReceiver holder() const;
internal::Address* argv_;
int argc_;
int const argc_;
};
static_assert(BuiltinArguments::kNumExtraArgs ==
BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver);
static_assert(BuiltinArguments::kNumExtraArgsWithReceiver ==
BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
} // namespace internal
} // namespace v8

View file

@ -7,8 +7,10 @@
#include "include/v8-fast-api-calls.h"
#include "src/api/api.h"
#include "src/common/assert-scope.h"
#include "src/execution/interrupts-scope.h"
#include "src/execution/microtask-queue.h"
#include "src/flags/flags.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/foreign-inl.h"
@ -51,7 +53,10 @@ inline v8::internal::Handle<v8::internal::Object> FromCData(
template <class From, class To>
inline Local<To> Utils::Convert(v8::internal::Handle<From> obj) {
DCHECK(obj.is_null() || (obj->IsSmi() || !obj->IsTheHole()));
return Local<To>(reinterpret_cast<To*>(obj.location()));
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
if (obj.is_null()) return Local<To>();
#endif
return Local<To>(internal::ValueHelper::SlotAsValue<To>(obj.location()));
}
// Implementations of ToLocal
@ -61,6 +66,8 @@ inline Local<To> Utils::Convert(v8::internal::Handle<From> obj) {
return Convert<v8::internal::From, v8::To>(obj); \
}
TO_LOCAL_LIST(MAKE_TO_LOCAL)
#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
@ -68,65 +75,51 @@ inline Local<To> Utils::Convert(v8::internal::Handle<From> obj) {
return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
}
MAKE_TO_LOCAL(ToLocal, AccessorPair, debug::AccessorPair)
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, Module, Module)
MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
MAKE_TO_LOCAL(ToLocal, JSProxy, Proxy)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ToLocal, BigInt, BigInt)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
MAKE_TO_LOCAL(FixedArrayToLocal, FixedArray, FixedArray)
MAKE_TO_LOCAL(PrimitiveArrayToLocal, FixedArray, PrimitiveArray)
MAKE_TO_LOCAL(ToLocal, ScriptOrModule, ScriptOrModule)
#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
#undef TO_LOCAL_LIST
// Implementations of OpenHandle
#define MAKE_OPEN_HANDLE(From, To) \
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \
DCHECK(allow_empty_handle || that != nullptr); \
DCHECK(that == nullptr || \
v8::internal::Object( \
*reinterpret_cast<const v8::internal::Address*>(that)) \
.Is##To()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::Address*>( \
const_cast<v8::From*>(that))); \
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
#define MAKE_OPEN_HANDLE(From, To) \
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \
DCHECK(allow_empty_handle || \
that != v8::internal::ValueHelper::EmptyValue<v8::From>()); \
DCHECK( \
that == v8::internal::ValueHelper::EmptyValue<v8::From>() || \
v8::internal::Object(v8::internal::ValueHelper::ValueAsAddress(that)) \
.Is##To()); \
if (that == v8::internal::ValueHelper::EmptyValue<v8::From>()) { \
return v8::internal::Handle<v8::internal::To>::null(); \
} \
return v8::internal::Handle<v8::internal::To>( \
v8::HandleScope::CreateHandleForCurrentIsolate( \
reinterpret_cast<v8::internal::Address>(that))); \
}
#else
#define MAKE_OPEN_HANDLE(From, To) \
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \
DCHECK(allow_empty_handle || \
that != v8::internal::ValueHelper::EmptyValue<v8::From>()); \
DCHECK( \
that == v8::internal::ValueHelper::EmptyValue<v8::From>() || \
v8::internal::Object(v8::internal::ValueHelper::ValueAsAddress(that)) \
.Is##To()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::Address*>( \
const_cast<v8::From*>(that))); \
}
#endif
OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE
@ -150,12 +143,13 @@ class V8_NODISCARD CallDepthScope {
isolate_->thread_local_top()->IncrementCallDepth(this);
isolate_->set_next_v8_call_is_safe_for_termination(false);
if (!context.IsEmpty()) {
i::Handle<i::Context> env = Utils::OpenHandle(*context);
i::DisallowGarbageCollection no_gc;
i::Context env = *Utils::OpenHandle(*context);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
if (isolate->context().is_null() ||
isolate->context().native_context() != env->native_context()) {
isolate->context().native_context() != env.native_context()) {
impl->SaveContext(isolate->context());
isolate->set_context(*env);
isolate->set_context(env);
did_enter_context_ = true;
}
}
@ -207,7 +201,8 @@ class V8_NODISCARD CallDepthScope {
bool did_perform_microtask_checkpoint =
isolate_->thread_local_top()->CallDepthIsZero() && do_callback &&
microtask_queue &&
microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto;
microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto &&
!isolate_->is_execution_terminating();
return !did_perform_microtask_checkpoint ||
isolate_->heap()->weak_refs_keep_during_job().IsUndefined(isolate_);
}

View file

@ -41,7 +41,8 @@
#define ENTER_V8_BASIC(i_isolate) \
/* Embedders should never enter V8 after terminating it */ \
DCHECK(!i_isolate->is_execution_terminating()); \
DCHECK_IMPLIES(i::v8_flags.strict_termination_checks, \
!i_isolate->is_execution_terminating()); \
i::VMState<v8::OTHER> __state__((i_isolate))
#define ENTER_V8_HELPER_INTERNAL(i_isolate, context, class_name, \
@ -91,29 +92,19 @@
bailout_value, HandleScopeClass, false); \
i::DisallowJavascriptExecutionDebugOnly __no_script__((i_isolate))
#define DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate) \
// Lightweight version for APIs that don't require an active context.
#define DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
i::DisallowJavascriptExecutionDebugOnly __no_script__((i_isolate)); \
i::DisallowExceptions __no_exceptions__((i_isolate))
// Lightweight version for APIs that don't require an active context.
#define DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
/* Embedders should never enter V8 after terminating it */ \
DCHECK(!i_isolate->is_execution_terminating()); \
DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate)
#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate)); \
DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate)
// Used instead of ENTER_V8_NO_SCRIPT_NO_EXCEPTION where the V8 Api is entered
// during termination sequences.
#define ENTER_V8_MAYBE_TEARDOWN(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate)); \
DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate)
#define ENTER_V8_FOR_NEW_CONTEXT(i_isolate) \
DCHECK(!(i_isolate)->is_execution_terminating()); \
i::VMState<v8::OTHER> __state__((i_isolate)); \
#define ENTER_V8_FOR_NEW_CONTEXT(i_isolate) \
DCHECK_IMPLIES(i::v8_flags.strict_termination_checks, \
!(i_isolate)->is_execution_terminating()); \
i::VMState<v8::OTHER> __state__((i_isolate)); \
i::DisallowExceptions __no_exceptions__((i_isolate))
#else // DEBUG
#define ENTER_V8_NO_SCRIPT(i_isolate, context, class_name, function_name, \
@ -122,14 +113,10 @@
bailout_value, HandleScopeClass, false)
#define DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate)
#define DCHECK_NO_SCRIPT_NO_EXCEPTION_MAYBE_TEARDOWN(i_isolate)
#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate));
#define ENTER_V8_MAYBE_TEARDOWN(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate));
#define ENTER_V8_FOR_NEW_CONTEXT(i_isolate) \
i::VMState<v8::OTHER> __state__((i_isolate));
#endif // DEBUG

View file

@ -83,7 +83,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(getter)),
Object);
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(getter)->set_code(*trampoline);
}
if (setter->IsFunctionTemplateInfo() &&
@ -93,7 +93,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(setter)),
Object);
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(setter)->set_code(*trampoline);
}
RETURN_ON_EXCEPTION(

1093
deps/v8/src/api/api.cc vendored

File diff suppressed because it is too large Load diff

150
deps/v8/src/api/api.h vendored
View file

@ -15,6 +15,7 @@
#include "src/execution/isolate.h"
#include "src/objects/bigint.h"
#include "src/objects/contexts.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
@ -92,6 +93,46 @@ class RegisteredExtension {
static RegisteredExtension* first_extension_;
};
#define TO_LOCAL_LIST(V) \
V(ToLocal, AccessorPair, debug::AccessorPair) \
V(ToLocal, Context, Context) \
V(ToLocal, Object, Value) \
V(ToLocal, Module, Module) \
V(ToLocal, Name, Name) \
V(ToLocal, String, String) \
V(ToLocal, Symbol, Symbol) \
V(ToLocal, JSRegExp, RegExp) \
V(ToLocal, JSReceiver, Object) \
V(ToLocal, JSObject, Object) \
V(ToLocal, JSFunction, Function) \
V(ToLocal, JSArray, Array) \
V(ToLocal, JSMap, Map) \
V(ToLocal, JSSet, Set) \
V(ToLocal, JSProxy, Proxy) \
V(ToLocal, JSArrayBuffer, ArrayBuffer) \
V(ToLocal, JSArrayBufferView, ArrayBufferView) \
V(ToLocal, JSDataView, DataView) \
V(ToLocal, JSRabGsabDataView, DataView) \
V(ToLocal, JSTypedArray, TypedArray) \
V(ToLocalShared, JSArrayBuffer, SharedArrayBuffer) \
V(ToLocal, FunctionTemplateInfo, FunctionTemplate) \
V(ToLocal, ObjectTemplateInfo, ObjectTemplate) \
V(SignatureToLocal, FunctionTemplateInfo, Signature) \
V(MessageToLocal, Object, Message) \
V(PromiseToLocal, JSObject, Promise) \
V(StackTraceToLocal, FixedArray, StackTrace) \
V(StackFrameToLocal, StackFrameInfo, StackFrame) \
V(NumberToLocal, Object, Number) \
V(IntegerToLocal, Object, Integer) \
V(Uint32ToLocal, Object, Uint32) \
V(ToLocal, BigInt, BigInt) \
V(ExternalToLocal, JSObject, External) \
V(CallableToLocal, JSReceiver, Function) \
V(ToLocalPrimitive, Object, Primitive) \
V(FixedArrayToLocal, FixedArray, FixedArray) \
V(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) \
V(ToLocal, ScriptOrModule, ScriptOrModule)
#define OPEN_HANDLE_LIST(V) \
V(Template, TemplateInfo) \
V(FunctionTemplate, FunctionTemplateInfo) \
@ -115,7 +156,7 @@ class RegisteredExtension {
V(Int32Array, JSTypedArray) \
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \
V(DataView, JSDataViewOrRabGsabDataView) \
V(SharedArrayBuffer, JSArrayBuffer) \
V(Name, Name) \
V(String, String) \
@ -155,104 +196,17 @@ class Utils {
static void ReportOOMFailure(v8::internal::Isolate* isolate,
const char* location, const OOMDetails& details);
static inline Local<debug::AccessorPair> ToLocal(
v8::internal::Handle<v8::internal::AccessorPair> obj);
static inline Local<Context> ToLocal(
v8::internal::Handle<v8::internal::Context> obj);
static inline Local<Value> ToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Module> ToLocal(
v8::internal::Handle<v8::internal::Module> obj);
static inline Local<Name> ToLocal(
v8::internal::Handle<v8::internal::Name> obj);
static inline Local<String> ToLocal(
v8::internal::Handle<v8::internal::String> obj);
static inline Local<Symbol> ToLocal(
v8::internal::Handle<v8::internal::Symbol> obj);
static inline Local<RegExp> ToLocal(
v8::internal::Handle<v8::internal::JSRegExp> obj);
static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSReceiver> obj);
static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Function> ToLocal(
v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<Map> ToLocal(
v8::internal::Handle<v8::internal::JSMap> obj);
static inline Local<Set> ToLocal(
v8::internal::Handle<v8::internal::JSSet> obj);
static inline Local<Proxy> ToLocal(
v8::internal::Handle<v8::internal::JSProxy> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
static inline Local<DataView> ToLocal(
v8::internal::Handle<v8::internal::JSDataView> obj);
static inline Local<TypedArray> ToLocal(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8Array> ToLocalUint8Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8ClampedArray> ToLocalUint8ClampedArray(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Int8Array> ToLocalInt8Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint16Array> ToLocalUint16Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Int16Array> ToLocalInt16Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint32Array> ToLocalUint32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Int32Array> ToLocalInt32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float32Array> ToLocalFloat32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<BigInt64Array> ToLocalBigInt64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<BigUint64Array> ToLocalBigUint64Array(
#define DECLARE_TO_LOCAL(Name, From, To) \
static inline Local<v8::To> Name( \
v8::internal::Handle<v8::internal::From> obj);
TO_LOCAL_LIST(DECLARE_TO_LOCAL)
#define DECLARE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
static inline Local<v8::Type##Array> ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
v8::internal::Handle<v8::internal::StackFrameInfo> obj);
static inline Local<Number> NumberToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Integer> IntegerToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Uint32> Uint32ToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<BigInt> ToLocal(
v8::internal::Handle<v8::internal::BigInt> obj);
static inline Local<FunctionTemplate> ToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<ObjectTemplate> ToLocal(
v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
static inline Local<Signature> SignatureToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Function> CallableToLocal(
v8::internal::Handle<v8::internal::JSReceiver> obj);
static inline Local<Primitive> ToLocalPrimitive(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<FixedArray> FixedArrayToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<PrimitiveArray> PrimitiveArrayToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<ScriptOrModule> ToLocal(
v8::internal::Handle<v8::internal::ScriptOrModule> obj);
TYPED_ARRAYS(DECLARE_TO_LOCAL_TYPED_ARRAY)
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> OpenHandle( \
@ -261,6 +215,8 @@ class Utils {
OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
#undef DECLARE_OPEN_HANDLE
#undef DECLARE_TO_LOCAL_TYPED_ARRAY
#undef DECLARE_TO_LOCAL
template <class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj);

View file

@ -77,7 +77,7 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
shared.builtin_id() != Builtin::kMath##FName) { \
return false; \
} \
DCHECK_EQ(shared.GetCode(), \
DCHECK_EQ(shared.GetCode(isolate), \
isolate->builtins()->code(Builtin::kMath##FName)); \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)

View file

@ -2240,7 +2240,7 @@ AsmType* AsmJsParser::ValidateCall() {
function_type->AsFunctionType()->AddArgument(t);
}
FunctionSig* sig = ConvertSignature(return_type, param_types);
uint32_t signature_index = module_builder_->AddSignature(sig);
uint32_t signature_index = module_builder_->AddSignature(sig, true);
// Emit actual function invocation depending on the kind. At this point we
// also determined the complete function type and can perform checking against

View file

@ -133,7 +133,7 @@ class AsmJsParser {
public:
explicit CachedVectors(Zone* zone) : reusable_vectors_(zone) {}
Zone* zone() const { return reusable_vectors_.get_allocator().zone(); }
Zone* zone() const { return reusable_vectors_.zone(); }
inline void fill(ZoneVector<T>* vec) {
if (reusable_vectors_.empty()) return;

View file

@ -130,7 +130,7 @@ class V8_EXPORT_PRIVATE AsmFunctionType final : public AsmCallableType {
AsmFunctionType* AsFunctionType() final { return this; }
void AddArgument(AsmType* type) { args_.push_back(type); }
const ZoneVector<AsmType*> Arguments() const { return args_; }
const ZoneVector<AsmType*>& Arguments() const { return args_; }
AsmType* ReturnType() const { return return_type_; }
bool CanBeInvokedWith(AsmType* return_type,

View file

@ -181,6 +181,10 @@ int AstRawString::Compare(const AstRawString* lhs, const AstRawString* rhs) {
return lhs->byte_length() - rhs->byte_length();
}
#ifdef OBJECT_PRINT
void AstRawString::Print() const { printf("%.*s", byte_length(), raw_data()); }
#endif // OBJECT_PRINT
template <typename IsolateT>
Handle<String> AstConsString::Allocate(IsolateT* isolate) const {
DCHECK(string_.is_null());

View file

@ -90,6 +90,10 @@ class AstRawString final : public ZoneObject {
return string_;
}
#ifdef OBJECT_PRINT
void Print() const;
#endif // OBJECT_PRINT
private:
friend class AstRawStringInternalizationKey;
friend class AstStringConstants;

View file

@ -1613,7 +1613,9 @@ enum AssignType {
PRIVATE_METHOD, // obj.#key: #key is a private method
PRIVATE_GETTER_ONLY, // obj.#key: #key only has a getter defined
PRIVATE_SETTER_ONLY, // obj.#key: #key only has a setter defined
PRIVATE_GETTER_AND_SETTER // obj.#key: #key has both accessors defined
PRIVATE_GETTER_AND_SETTER, // obj.#key: #key has both accessors defined
PRIVATE_DEBUG_DYNAMIC, // obj.#key: #key is private that requries dynamic
// lookup in debug-evaluate.
};
class Property final : public Expression {
@ -1650,6 +1652,9 @@ class Property final : public Expression {
return PRIVATE_SETTER_ONLY;
case VariableMode::kPrivateGetterAndSetter:
return PRIVATE_GETTER_AND_SETTER;
case VariableMode::kDynamic:
// From debug-evaluate.
return PRIVATE_DEBUG_DYNAMIC;
default:
UNREACHABLE();
}

View file

@ -1367,6 +1367,10 @@ void AstPrinter::VisitProperty(Property* node) {
PrintIndentedVisit("KEY", node->key());
break;
}
case PRIVATE_DEBUG_DYNAMIC: {
PrintIndentedVisit("PRIVATE_DEBUG_DYNAMIC", node->key());
break;
}
case NON_PROPERTY:
UNREACHABLE();
}

View file

@ -632,7 +632,8 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// scope and we terminate the iteration there anyway.
do {
Variable* var = query_scope->LookupInScopeOrScopeInfo(name, query_scope);
if (var != nullptr && IsLexicalVariableMode(var->mode())) {
if (var != nullptr && IsLexicalVariableMode(var->mode()) &&
!var->is_sloppy_block_function()) {
should_hoist = false;
break;
}
@ -649,6 +650,19 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
auto declaration = factory->NewVariableDeclaration(pos);
// Based on the preceding checks, it doesn't matter what we pass as
// sloppy_mode_block_scope_function_redefinition.
//
// This synthesized var for Annex B functions-in-block (FiB) may be
// declared multiple times for the same var scope, such as in the case of
// shadowed functions-in-block like the following:
//
// {
// function f() {}
// { function f() {} }
// }
//
// Redeclarations for vars do not create new bindings, but the
// redeclarations' initializers are still run. That is, shadowed FiB will
// result in multiple assignments to the same synthesized var.
Variable* var = DeclareVariable(
declaration, name, pos, VariableMode::kVar, NORMAL_VARIABLE,
Variable::DefaultInitializationFlag(VariableMode::kVar), &was_added,
@ -1263,8 +1277,9 @@ Declaration* DeclarationScope::CheckConflictingVarDeclarations(
if (decl->IsVariableDeclaration() &&
decl->AsVariableDeclaration()->AsNested() != nullptr) {
Scope* current = decl->AsVariableDeclaration()->AsNested()->scope();
DCHECK(decl->var()->mode() == VariableMode::kVar ||
decl->var()->mode() == VariableMode::kDynamic);
if (decl->var()->mode() != VariableMode::kVar &&
decl->var()->mode() != VariableMode::kDynamic)
continue;
// Iterate through all scopes until the declaration scope.
do {
// There is a conflict if there exists a non-VAR binding.
@ -1796,6 +1811,8 @@ const char* Header(ScopeType scope_type, FunctionKind function_kind,
case CLASS_SCOPE:
return "class";
case WITH_SCOPE: return "with";
case SHADOW_REALM_SCOPE:
return "shadowrealm";
}
UNREACHABLE();
}
@ -2058,6 +2075,15 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
return var;
}
void Scope::ForceDynamicLookup(VariableProxy* proxy) {
// At the moment this is only used for looking up private names dynamically
// in debug-evaluate from top-level scope.
DCHECK(proxy->IsPrivateName());
DCHECK(is_script_scope() || is_module_scope() || is_eval_scope());
Variable* dynamic = NonLocal(proxy->raw_name(), VariableMode::kDynamic);
proxy->BindTo(dynamic);
}
// static
template <Scope::ScopeLookupMode mode>
Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
@ -3109,6 +3135,13 @@ void PrivateNameScopeIterator::AddUnresolvedPrivateName(VariableProxy* proxy) {
// be new.
DCHECK(!proxy->is_resolved());
DCHECK(proxy->IsPrivateName());
// Use dynamic lookup for top-level scopes in debug-evaluate.
if (Done()) {
start_scope_->ForceDynamicLookup(proxy);
return;
}
GetScope()->EnsureRareData()->unresolved_private_names.Add(proxy);
// Any closure scope that contain uses of private names that skips over a
// class scope due to heritage expressions need private name context chain

View file

@ -637,6 +637,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return nullptr;
}
void ForceDynamicLookup(VariableProxy* proxy);
protected:
explicit Scope(Zone* zone);

View file

@ -5,3 +5,9 @@ include_rules = [
"-src",
"+src/base",
]
specific_include_rules = {
"ieee754.h": [
"+third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h"
],
}

View file

@ -62,7 +62,7 @@ class BitField final {
}
// Returns a type U with the bit field value updated.
static constexpr U update(U previous, T value) {
V8_NODISCARD static constexpr U update(U previous, T value) {
return (previous & ~kMask) | encode(value);
}

View file

@ -310,9 +310,13 @@ inline bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed summation resulted in an overflow.
inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
#if V8_HAS_BUILTIN_ADD_OVERFLOW
return __builtin_add_overflow(lhs, rhs, val);
#else
uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
*val = base::bit_cast<int64_t>(res);
return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
#endif
}
@ -320,9 +324,34 @@ inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed subtraction resulted in an overflow.
inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
#if V8_HAS_BUILTIN_SUB_OVERFLOW
return __builtin_sub_overflow(lhs, rhs, val);
#else
uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
*val = base::bit_cast<int64_t>(res);
return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
#endif
}
// SignedMulOverflow64(lhs,rhs,val) performs a signed multiplication of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed multiplication resulted in an overflow.
inline bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
#if V8_HAS_BUILTIN_MUL_OVERFLOW
return __builtin_mul_overflow(lhs, rhs, val);
#else
int64_t res = base::bit_cast<int64_t>(static_cast<uint64_t>(lhs) *
static_cast<uint64_t>(rhs));
*val = res;
// Check for INT64_MIN / -1 as it's undefined behaviour and could cause
// hardware exceptions.
if ((res == INT64_MIN && lhs == -1)) {
return true;
}
return lhs != 0 && (res / lhs) != rhs;
#endif
}
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and

View file

@ -28,7 +28,8 @@
#endif
// pthread_jit_write_protect is only available on arm64 Mac.
#if defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)
#if defined(V8_HOST_ARCH_ARM64) && \
(defined(V8_OS_MACOS) || (defined(V8_OS_IOS) && TARGET_OS_SIMULATOR))
#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 1
#else
#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 0
@ -70,9 +71,10 @@ constexpr int kPageSizeBits = 18;
// The minimal supported page size by the operation system. Any region aligned
// to that size needs to be individually protectable via
// {base::OS::SetPermission} and friends.
#if (defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)) || \
defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64)
// MacOS on arm64 uses 16kB pages.
#if (defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)) || \
defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64) || \
defined(V8_OS_IOS)
// MacOS & iOS on arm64 uses 16kB pages.
// LOONG64 and MIPS64 also use 16kB pages.
constexpr int kMinimumOSPageSize = 16 * 1024;
#elif defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID) && \

View file

@ -6,6 +6,7 @@
#define V8_BASE_CONTAINER_UTILS_H_
#include <algorithm>
#include <iterator>
#include <optional>
#include <vector>
@ -14,16 +15,16 @@ namespace v8::base {
// Returns true iff the {element} is found in the {container}.
template <typename C, typename T>
bool contains(const C& container, const T& element) {
const auto e = end(container);
return std::find(begin(container), e, element) != e;
const auto e = std::end(container);
return std::find(std::begin(container), e, element) != e;
}
// Returns the first index of {element} in {container}. Returns std::nullopt if
// {container} does not contain {element}.
template <typename C, typename T>
std::optional<size_t> index_of(const C& container, const T& element) {
const auto b = begin(container);
const auto e = end(container);
const auto b = std::begin(container);
const auto e = std::end(container);
if (auto it = std::find(b, e, element); it != e) {
return {std::distance(b, it)};
}
@ -34,8 +35,8 @@ std::optional<size_t> index_of(const C& container, const T& element) {
// {predicate}. Returns std::nullopt if no element satisfies {predicate}.
template <typename C, typename P>
std::optional<size_t> index_of_if(const C& container, const P& predicate) {
const auto b = begin(container);
const auto e = end(container);
const auto b = std::begin(container);
const auto e = std::end(container);
if (auto it = std::find_if(b, e, predicate); it != e) {
return {std::distance(b, it)};
}
@ -48,9 +49,9 @@ std::optional<size_t> index_of_if(const C& container, const P& predicate) {
template <typename C>
inline size_t erase_at(C& container, size_t index, size_t count = 1) {
// TODO(C++20): Replace with std::erase.
if (size(container) <= index) return 0;
auto start = begin(container) + index;
count = std::min<size_t>(count, std::distance(start, end(container)));
if (std::size(container) <= index) return 0;
auto start = std::begin(container) + index;
count = std::min<size_t>(count, std::distance(start, std::end(container)));
container.erase(start, start + count);
return count;
}
@ -60,43 +61,48 @@ inline size_t erase_at(C& container, size_t index, size_t count = 1) {
// TODO(C++20): Replace with std::erase_if.
template <typename C, typename P>
inline size_t erase_if(C& container, const P& predicate) {
size_t count = 0;
auto e = end(container);
for (auto it = begin(container); it != e;) {
it = std::find_if(it, e, predicate);
if (it == e) break;
it = container.erase(it);
e = end(container);
++count;
}
auto it =
std::remove_if(std::begin(container), std::end(container), predicate);
auto count = std::distance(it, std::end(container));
container.erase(it, std::end(container));
return count;
}
// Helper for std::count_if.
template <typename C, typename P>
inline size_t count_if(const C& container, const P& predicate) {
return std::count_if(begin(container), end(container), predicate);
return std::count_if(std::begin(container), std::end(container), predicate);
}
// Helper for std::all_of.
template <typename C, typename P>
inline bool all_of(const C& container, const P& predicate) {
return std::all_of(begin(container), end(container), predicate);
return std::all_of(std::begin(container), std::end(container), predicate);
}
// Helper for std::none_of.
template <typename C, typename P>
inline bool none_of(const C& container, const P& predicate) {
return std::none_of(begin(container), end(container), predicate);
return std::none_of(std::begin(container), std::end(container), predicate);
}
// Helper for std::sort.
template <typename C>
inline void sort(C& container) {
std::sort(std::begin(container), std::end(container));
}
template <typename C, typename Comp>
inline void sort(C& container, Comp comp) {
std::sort(std::begin(container), std::end(container), comp);
}
// Returns true iff all elements of {container} compare equal using operator==.
template <typename C>
inline bool all_equal(const C& container) {
if (size(container) <= 1) return true;
auto b = begin(container);
if (std::size(container) <= 1) return true;
auto b = std::begin(container);
const auto& value = *b;
return std::all_of(++b, end(container),
return std::all_of(++b, std::end(container),
[&](const auto& v) { return v == value; });
}
@ -104,15 +110,15 @@ inline bool all_equal(const C& container) {
// operator==.
template <typename C, typename T>
inline bool all_equal(const C& container, const T& value) {
return std::all_of(begin(container), end(container),
return std::all_of(std::begin(container), std::end(container),
[&](const auto& v) { return v == value; });
}
// Appends to vector {v} all the elements in the range {begin(container)} and
// {end(container)}.
template <typename T, typename A, typename C>
inline void vector_append(std::vector<T, A>& v, const C& container) {
v.insert(end(v), begin(container), end(container));
// Appends to vector {v} all the elements in the range {std::begin(container)}
// and {std::end(container)}.
template <typename V, typename C>
inline void vector_append(V& v, const C& container) {
v.insert(std::end(v), std::begin(container), std::end(container));
}
} // namespace v8::base

View file

@ -2,20 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_TORQUE_CONTEXTUAL_H_
#define V8_TORQUE_CONTEXTUAL_H_
#ifndef V8_BASE_CONTEXTUAL_H_
#define V8_BASE_CONTEXTUAL_H_
#include <type_traits>
#include "src/base/export-template.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
namespace v8 {
namespace internal {
namespace torque {
template <class Variable>
V8_EXPORT_PRIVATE typename Variable::Scope*& ContextualVariableTop();
namespace v8::base {
// {ContextualVariable} provides a clean alternative to a global variable.
// The contextual variable is mutable, and supports managing the value of
@ -28,8 +24,10 @@ V8_EXPORT_PRIVATE typename Variable::Scope*& ContextualVariableTop();
// Note that contextual variables must only be used from the same thread,
// i.e. {Scope} and Get() have to be in the same thread.
template <class Derived, class VarType>
class ContextualVariable {
class V8_EXPORT_PRIVATE ContextualVariable {
public:
using VarT = VarType;
// A {Scope} contains a new object of type {VarType} and gives
// ContextualVariable::Get() access to it. Upon destruction, the contextual
// variable is restored to the state before the {Scope} was created. Scopes
@ -47,6 +45,7 @@ class ContextualVariable {
DCHECK_EQ(this, Top());
Top() = previous_;
}
Scope(const Scope&) = delete;
Scope& operator=(const Scope&) = delete;
@ -62,32 +61,42 @@ class ContextualVariable {
DISALLOW_NEW_AND_DELETE()
};
// Access the most recent active {Scope}. There has to be an active {Scope}
// for this contextual variable.
static VarType& Get() {
DCHECK_NOT_NULL(Top());
DCHECK(HasScope());
return Top()->Value();
}
private:
template <class T>
friend V8_EXPORT_PRIVATE typename T::Scope*& ContextualVariableTop();
static Scope*& Top() { return ContextualVariableTop<Derived>(); }
static bool HasScope() { return Top() != nullptr; }
friend class MessageBuilder;
private:
inline static thread_local Scope* top_ = nullptr;
// If there is a linking error for `Top()`, then the contextual variable
// probably needs to be exported using EXPORT_CONTEXTUAL_VARIABLE.
#if defined(USING_V8_SHARED)
// Hide the definition from other DLLs/libraries to avoid access to `top_`,
// since access to thread_local variables from other DLLs/libraries does not
// work correctly.
static Scope*& Top();
#else
static Scope*& Top() { return top_; }
#endif
};
// Usage: DECLARE_CONTEXTUAL_VARIABLE(VarName, VarType)
#define DECLARE_CONTEXTUAL_VARIABLE(VarName, ...) \
struct VarName \
: v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {}
struct VarName : ::v8::base::ContextualVariable<VarName, __VA_ARGS__> {}
#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
template <> \
V8_EXPORT_PRIVATE VarName::Scope*& ContextualVariableTop<VarName>() { \
static thread_local VarName::Scope* top = nullptr; \
return top; \
// Contextual variables that are accessed in tests need to be
// exported. For this, place the following macro in the global namespace inside
// of a .cc file.
#define EXPORT_CONTEXTUAL_VARIABLE(VarName) \
namespace v8::base { \
template <> \
V8_EXPORT_PRIVATE typename VarName::Scope*& \
ContextualVariable<VarName, typename VarName::VarT>::Top() { \
return top_; \
} \
}
// By inheriting from {ContextualClass} a class can become a contextual variable
@ -95,8 +104,6 @@ class ContextualVariable {
template <class T>
using ContextualClass = ContextualVariable<T, T>;
} // namespace torque
} // namespace internal
} // namespace v8
} // namespace v8::base
#endif // V8_TORQUE_CONTEXTUAL_H_
#endif // V8_BASE_CONTEXTUAL_H_

View file

@ -404,6 +404,8 @@ CPU::CPU()
has_vfp3_(false),
has_vfp3_d32_(false),
has_jscvt_(false),
has_dot_prod_(false),
has_lse_(false),
is_fp64_mode_(false),
has_non_stop_time_stamp_counter_(false),
is_running_in_vm_(false),
@ -726,20 +728,29 @@ CPU::CPU()
#if !defined(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE)
constexpr int PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE = 44;
#endif
#if !defined(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE)
constexpr int PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE = 43;
#endif
has_jscvt_ =
IsProcessorFeaturePresent(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE);
has_dot_prod_ =
IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE);
#elif V8_OS_LINUX
// Try to extract the list of CPU features from ELF hwcaps.
uint32_t hwcaps = ReadELFHWCaps();
if (hwcaps != 0) {
has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0;
has_dot_prod_ = (hwcaps & HWCAP_ASIMDDP) != 0;
has_lse_ = (hwcaps & HWCAP_ATOMICS) != 0;
} else {
// Try to fallback to "Features" CPUInfo field
CPUInfo cpu_info;
char* features = cpu_info.ExtractField("Features");
has_jscvt_ = HasListItem(features, "jscvt");
has_dot_prod_ = HasListItem(features, "asimddp");
has_lse_ = HasListItem(features, "atomics");
delete[] features;
}
#elif V8_OS_DARWIN
@ -752,9 +763,27 @@ CPU::CPU()
} else {
has_jscvt_ = feat_jscvt;
}
int64_t feat_dot_prod = 0;
size_t feat_dot_prod_size = sizeof(feat_dot_prod);
if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &feat_dot_prod,
&feat_dot_prod_size, nullptr, 0) == -1) {
has_dot_prod_ = false;
} else {
has_dot_prod_ = feat_dot_prod;
}
int64_t feat_lse = 0;
size_t feat_lse_size = sizeof(feat_lse);
if (sysctlbyname("hw.optional.arm.FEAT_LSE", &feat_lse, &feat_lse_size,
nullptr, 0) == -1) {
has_lse_ = false;
} else {
has_lse_ = feat_lse;
}
#else
// ARM64 Macs always have JSCVT.
// ARM64 Macs always have JSCVT, ASIMDDP and LSE.
has_jscvt_ = true;
has_dot_prod_ = true;
has_lse_ = true;
#endif // V8_OS_IOS
#endif // V8_OS_WIN

View file

@ -123,6 +123,8 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
bool has_jscvt() const { return has_jscvt_; }
bool has_dot_prod() const { return has_dot_prod_; }
bool has_lse() const { return has_lse_; }
// mips features
bool is_fp64_mode() const { return is_fp64_mode_; }
@ -176,6 +178,8 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3_;
bool has_vfp3_d32_;
bool has_jscvt_;
bool has_dot_prod_;
bool has_lse_;
bool is_fp64_mode_;
bool has_non_stop_time_stamp_counter_;
bool is_running_in_vm_;

View file

@ -105,9 +105,9 @@ namespace {
} while (false)
int32_t __ieee754_rem_pio2(double x, double* y) V8_WARN_UNUSED_RESULT;
double __kernel_cos(double x, double y) V8_WARN_UNUSED_RESULT;
int __kernel_rem_pio2(double* x, double* y, int e0, int nx, int prec,
const int32_t* ipio2) V8_WARN_UNUSED_RESULT;
double __kernel_cos(double x, double y) V8_WARN_UNUSED_RESULT;
double __kernel_sin(double x, double y, int iy) V8_WARN_UNUSED_RESULT;
/* __ieee754_rem_pio2(x,y)
@ -1348,7 +1348,11 @@ double atan2(double y, double x) {
* Accuracy:
* TRIG(x) returns trig(x) nearly rounded
*/
#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
double fdlibm_cos(double x) {
#else
double cos(double x) {
#endif
double y[2], z = 0.0;
int32_t n, ix;
@ -2440,7 +2444,11 @@ double cbrt(double x) {
* Accuracy:
* TRIG(x) returns trig(x) nearly rounded
*/
#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
double fdlibm_sin(double x) {
#else
double sin(double x) {
#endif
double y[2], z = 0.0;
int32_t n, ix;
@ -3015,6 +3023,11 @@ double tanh(double x) {
#undef SET_HIGH_WORD
#undef SET_LOW_WORD
#if defined(V8_USE_LIBM_TRIG_FUNCTIONS) && defined(BUILDING_V8_BASE_SHARED)
double libm_sin(double x) { return glibc_sin(x); }
double libm_cos(double x) { return glibc_cos(x); }
#endif
} // namespace ieee754
} // namespace base
} // namespace v8

View file

@ -7,6 +7,10 @@
#include "src/base/base-export.h"
#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
#include "third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h" // nogncheck
#endif
namespace v8 {
namespace base {
namespace ieee754 {
@ -33,8 +37,24 @@ V8_BASE_EXPORT double atan(double x);
// the two arguments to determine the quadrant of the result.
V8_BASE_EXPORT double atan2(double y, double x);
// Returns the cosine of |x|, where |x| is given in radians.
#if defined(V8_USE_LIBM_TRIG_FUNCTIONS)
// To ensure there aren't problems with libm's sin/cos, both implementations
// are shipped. The plan is to transition to libm once we ensure there are no
// compatibility or performance issues.
V8_BASE_EXPORT double fdlibm_sin(double x);
V8_BASE_EXPORT double fdlibm_cos(double x);
#if !defined(BUILDING_V8_BASE_SHARED) && !defined(USING_V8_BASE_SHARED)
inline double libm_sin(double x) { return glibc_sin(x); }
inline double libm_cos(double x) { return glibc_cos(x); }
#else
V8_BASE_EXPORT double libm_sin(double x);
V8_BASE_EXPORT double libm_cos(double x);
#endif
#else
V8_BASE_EXPORT double cos(double x);
V8_BASE_EXPORT double sin(double x);
#endif
// Returns the base-e exponential of |x|.
V8_BASE_EXPORT double exp(double x);
@ -68,9 +88,6 @@ V8_BASE_EXPORT double expm1(double x);
// behaviour is preserved for compatibility reasons.
V8_BASE_EXPORT double pow(double x, double y);
// Returns the sine of |x|, where |x| is given in radians.
V8_BASE_EXPORT double sin(double x);
// Returns the tangent of |x|, where |x| is given in radians.
V8_BASE_EXPORT double tan(double x);

View file

@ -140,9 +140,9 @@
[] { TRAP_SEQUENCE_(); }(); \
} while (false)
#endif // !V8_CC_GCC
#endif // !V8_CC_GNU
#if defined(__clang__) || V8_CC_GCC
#if defined(__clang__) || V8_CC_GNU
// __builtin_unreachable() hints to the compiler that this is noreturn and can
// be packed in the function epilogue.

31
deps/v8/src/base/ios-headers.h vendored Normal file
View file

@ -0,0 +1,31 @@
// Copyright 2023 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_IOS_HEADERS_H_
#define V8_BASE_IOS_HEADERS_H_
// This file includes the necessary headers that are not part of the
// iOS public SDK in order to support memory allocation on iOS.
#include <mach/mach.h>
#include <mach/vm_map.h>
__BEGIN_DECLS
kern_return_t mach_vm_remap(
vm_map_t target_task, mach_vm_address_t* target_address,
mach_vm_size_t size, mach_vm_offset_t mask, int flags, vm_map_t src_task,
mach_vm_address_t src_address, boolean_t copy, vm_prot_t* cur_protection,
vm_prot_t* max_protection, vm_inherit_t inheritance);
kern_return_t mach_vm_map(vm_map_t target_task, mach_vm_address_t* address,
mach_vm_size_t size, mach_vm_offset_t mask, int flags,
mem_entry_name_port_t object,
memory_object_offset_t offset, boolean_t copy,
vm_prot_t cur_protection, vm_prot_t max_protection,
vm_inherit_t inheritance);
__END_DECLS
#endif // V8_BASE_IOS_HEADERS_H_

View file

@ -46,8 +46,14 @@ V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
#endif // !defined(OFFICIAL_BUILD)
#endif // DEBUG
#define UNIMPLEMENTED() FATAL("unimplemented code")
#define UNREACHABLE() FATAL("unreachable code")
namespace v8::base {
// These string constants are pattern-matched by fuzzers.
constexpr const char* kUnimplementedCodeMessage = "unimplemented code";
constexpr const char* kUnreachableCodeMessage = "unreachable code";
} // namespace v8::base
#define UNIMPLEMENTED() FATAL(::v8::base::kUnimplementedCodeMessage)
#define UNREACHABLE() FATAL(::v8::base::kUnreachableCodeMessage)
// g++ versions <= 8 cannot use UNREACHABLE() in a constexpr function.
// TODO(miladfarca): Remove once all compilers handle this properly.
#if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ <= 8)

View file

@ -48,9 +48,18 @@ class DiyFp {
// returns a * b;
static DiyFp Times(const DiyFp& a, const DiyFp& b) {
#ifdef __SIZEOF_INT128__
// If we have compiler-assisted 64x64 -> 128 muls (e.g. x86-64 and
// aarch64), we can use that for a faster, inlined implementation.
// This rounds the same way as Multiply().
uint64_t hi = (a.f_ * static_cast<unsigned __int128>(b.f_)) >> 64;
uint64_t lo = (a.f_ * static_cast<unsigned __int128>(b.f_));
return {hi + (lo >> 63), a.e_ + b.e_ + 64};
#else
DiyFp result = a;
result.Multiply(b);
return result;
#endif
}
void Normalize() {

View file

@ -134,10 +134,9 @@ class Double {
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
DCHECK_GT(value(), 0.0);
DiyFp v = this->AsDiyFp();
bool significand_is_zero = (v.f() == kHiddenBit);
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
DiyFp m_minus;
if (significand_is_zero && v.e() != kDenormalExponent) {
if ((AsUint64() & kSignificandMask) == 0 && v.e() != kDenormalExponent) {
// The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
// Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
// at a distance of 1e8.

View file

@ -39,9 +39,9 @@ static const int kMaximalTargetExponent = -32;
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
static bool RoundWeed(Vector<char> buffer, int length,
uint64_t distance_too_high_w, uint64_t unsafe_interval,
uint64_t rest, uint64_t ten_kappa, uint64_t unit) {
static bool RoundWeed(char* last_digit, uint64_t distance_too_high_w,
uint64_t unsafe_interval, uint64_t rest,
uint64_t ten_kappa, uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w_low = too_high - big_distance, and
@ -120,7 +120,7 @@ static bool RoundWeed(Vector<char> buffer, int length,
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w_high
small_distance - rest >= rest + ten_kappa - small_distance)) {
buffer[length - 1]--;
--*last_digit;
rest += ten_kappa;
}
@ -200,13 +200,62 @@ static const uint32_t kTen7 = 10000000;
static const uint32_t kTen8 = 100000000;
static const uint32_t kTen9 = 1000000000;
struct DivMagic {
uint32_t mul;
uint32_t shift;
};
// This table was computed by libdivide. Essentially, the shift is
// floor(log2(x)), and the mul is 2^(33 + shift) / x, rounded up and truncated
// to 32 bits.
static const DivMagic div[] = {
{0, 0}, // Not used, since 1 is not supported by the algorithm.
{0x9999999a, 3}, // 10
{0x47ae147b, 6}, // 100
{0x0624dd30, 9}, // 1000
{0xa36e2eb2, 13}, // 10000
{0x4f8b588f, 16}, // 100000
{0x0c6f7a0c, 19}, // 1000000
{0xad7f29ac, 23}, // 10000000
{0x5798ee24, 26} // 100000000
};
// Returns *val / divisor, and does *val %= divisor. d must be the DivMagic
// corresponding to the divisor.
//
// This algorithm is exactly the same as libdivide's branch-free u32 algorithm,
// except that we add back a branch anyway to support 1.
//
// GCC/Clang uses a slightly different algorithm that doesn't need
// the extra rounding step (and that would allow us to do 1 without
// a branch), but it requires a pre-shift for the case of 10000,
// so it ends up slower, at least on x86-64.
//
// Note that this is actually a small loss for certain CPUs with
// a very fast divider (e.g. Zen 3), but a significant win for most
// others (including the entire Skylake family).
static inline uint32_t fast_divmod(uint32_t* val, uint32_t divisor,
const DivMagic& d) {
if (divisor == 1) {
uint32_t digit = *val;
*val = 0;
return digit;
} else {
uint32_t q = (static_cast<uint64_t>(*val) * d.mul) >> 32;
uint32_t t = ((*val - q) >> 1) + q;
uint32_t digit = t >> d.shift;
*val -= digit * divisor;
return digit;
}
}
// Returns the biggest power of ten that is less than or equal than the given
// number. We furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
// Precondition: number < (1 << (number_bits + 1)).
static void BiggestPowerTen(uint32_t number, int number_bits, uint32_t* power,
int* exponent) {
static inline void BiggestPowerTen(uint32_t number, int number_bits,
uint32_t* power, unsigned* exponent) {
switch (number_bits) {
case 32:
case 31:
@ -354,8 +403,8 @@ static void BiggestPowerTen(uint32_t number, int number_bits, uint32_t* power,
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
int* length, int* kappa) {
static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, char** outptr,
int* kappa) {
DCHECK(low.e() == w.e() && w.e() == high.e());
DCHECK(low.f() + 1 <= high.f() - 1);
DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
@ -389,20 +438,18 @@ static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divisor;
int divisor_exponent;
unsigned divisor_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), &divisor,
&divisor_exponent);
*kappa = divisor_exponent + 1;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
int digit = integrals / divisor;
buffer[*length] = '0' + digit;
(*length)++;
integrals %= divisor;
uint32_t digit = fast_divmod(&integrals, divisor, div[divisor_exponent]);
**outptr = '0' + digit;
(*outptr)++;
(*kappa)--;
// Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
@ -413,11 +460,17 @@ static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
if (rest < unsafe_interval.f()) {
// Rounding down (by not emitting the remaining digits) yields a number
// that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
return RoundWeed(*outptr - 1, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
static_cast<uint64_t>(divisor) << -one.e(), unit);
}
if (*kappa <= 0) {
// Don't bother doing the division below. (The compiler ought to
// figure this out itself, but it doesn't.)
break;
}
divisor /= 10;
--divisor_exponent;
}
// The integrals have been generated. We are at the point of the decimal
@ -435,12 +488,12 @@ static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
unsafe_interval.set_f(unsafe_interval.f() * 10);
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
buffer[*length] = '0' + digit;
(*length)++;
**outptr = '0' + digit;
(*outptr)++;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
if (fractionals < unsafe_interval.f()) {
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
return RoundWeed(*outptr - 1, DiyFp::Minus(too_high, w).f() * unit,
unsafe_interval.f(), fractionals, one.f(), unit);
}
}
@ -492,7 +545,7 @@ static bool DigitGenCounted(DiyFp w, int requested_digits, Vector<char> buffer,
// Modulo by one is an and.
uint64_t fractionals = w.f() & (one.f() - 1);
uint32_t divisor;
int divisor_exponent;
unsigned divisor_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), &divisor,
&divisor_exponent);
*kappa = divisor_exponent + 1;
@ -503,16 +556,16 @@ static bool DigitGenCounted(DiyFp w, int requested_digits, Vector<char> buffer,
// with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than 'integrals'.
while (*kappa > 0) {
int digit = integrals / divisor;
uint32_t digit = fast_divmod(&integrals, divisor, div[divisor_exponent]);
buffer[*length] = '0' + digit;
(*length)++;
requested_digits--;
integrals %= divisor;
(*kappa)--;
// Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
if (requested_digits == 0) break;
divisor /= 10;
--divisor_exponent;
}
if (requested_digits == 0) {
@ -559,8 +612,7 @@ static bool DigitGenCounted(DiyFp w, int requested_digits, Vector<char> buffer,
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
static bool Grisu3(double v, Vector<char> buffer, int* length,
int* decimal_exponent) {
static bool Grisu3(double v, char** outptr, int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between
@ -610,7 +662,7 @@ static bool Grisu3(double v, Vector<char> buffer, int* length,
// decreased by 2.
int kappa;
bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
buffer, length, &kappa);
outptr, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
@ -665,15 +717,20 @@ bool FastDtoa(double v, FastDtoaMode mode, int requested_digits,
DCHECK(!Double(v).IsSpecial());
bool result = false;
char* outptr = buffer.data();
int decimal_exponent = 0;
switch (mode) {
case FAST_DTOA_SHORTEST:
result = Grisu3(v, buffer, length, &decimal_exponent);
result = Grisu3(v, &outptr, &decimal_exponent);
*length = static_cast<int>(outptr - buffer.data());
break;
case FAST_DTOA_PRECISION:
result =
Grisu3Counted(v, requested_digits, buffer, length, &decimal_exponent);
case FAST_DTOA_PRECISION: {
int local_length = 0;
result = Grisu3Counted(v, requested_digits, buffer, &local_length,
&decimal_exponent);
*length = local_length;
break;
}
default:
UNREACHABLE();
}

View file

@ -126,8 +126,8 @@ inline size_t MallocUsableSize(void* ptr) {
// Mimics C++23 `allocation_result`.
template <class Pointer>
struct AllocationResult {
Pointer ptr;
size_t count;
Pointer ptr = nullptr;
size_t count = 0;
};
// Allocates at least `n * sizeof(T)` uninitialized storage but may allocate

View file

@ -136,7 +136,7 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
Stack::StackSlot Stack::GetStackStart() {
Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
// pthread_getthrds_np creates 3 values:
// __pi_stackaddr, __pi_stacksize, __pi_stackend

View file

@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Platform-specific code shared between macOS and iOS goes here. The macOS
// specific part is in platform-macos.cc, the POSIX-compatible parts in
// platform-posix.cc.
// Platform-specific code shared between macOS and iOS goes here. The
// POSIX-compatible parts in platform-posix.cc.
#include <AvailabilityMacros.h>
#include <dlfcn.h>
@ -16,6 +15,7 @@
#include <mach/mach_init.h>
#include <mach/semaphore.h>
#include <mach/task.h>
#include <mach/vm_map.h>
#include <mach/vm_statistics.h>
#include <pthread.h>
#include <semaphore.h>
@ -39,23 +39,60 @@
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
#if defined(V8_TARGET_OS_IOS)
#include "src/base/ios-headers.h"
#else
#include <mach/mach_vm.h>
#endif
namespace v8 {
namespace base {
namespace {
vm_prot_t GetVMProtFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
case OS::MemoryPermission::kNoAccessWillJitLater:
return VM_PROT_NONE;
case OS::MemoryPermission::kRead:
return VM_PROT_READ;
case OS::MemoryPermission::kReadWrite:
return VM_PROT_READ | VM_PROT_WRITE;
case OS::MemoryPermission::kReadWriteExecute:
return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
case OS::MemoryPermission::kReadExecute:
return VM_PROT_READ | VM_PROT_EXECUTE;
}
UNREACHABLE();
}
kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address,
mach_vm_size_t size, int flags,
mach_port_t port,
memory_object_offset_t offset,
vm_prot_t prot) {
vm_prot_t current_prot = prot;
vm_prot_t maximum_prot = current_prot;
return mach_vm_map(mach_task_self(), address, size, 0, flags, port, offset,
FALSE, current_prot, maximum_prot, VM_INHERIT_NONE);
}
} // namespace
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
if (header == nullptr) continue;
unsigned long size;
#if V8_HOST_ARCH_I32
unsigned int size;
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
uint8_t* code_ptr = getsectiondata(header, SEG_TEXT, SECT_TEXT, &size);
#else
uint64_t size;
char* code_ptr = getsectdatafromheader_64(
reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
&size);
const mach_header_64* header64 =
reinterpret_cast<const mach_header_64*>(header);
uint8_t* code_ptr = getsectiondata(header64, SEG_TEXT, SECT_TEXT, &size);
#endif
if (code_ptr == nullptr) continue;
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
@ -99,9 +136,105 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
Stack::StackSlot Stack::GetStackStart() {
Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
return pthread_get_stackaddr_np(pthread_self());
}
// static
PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
mach_vm_size_t vm_size = size;
mach_port_t port;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), &vm_size, 0,
MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port,
MACH_PORT_NULL);
if (kr != KERN_SUCCESS) return kInvalidSharedMemoryHandle;
return SharedMemoryHandleFromMachMemoryEntry(port);
}
// static
void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
DCHECK_NE(kInvalidSharedMemoryHandle, handle);
mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle);
CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port));
}
// static
void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
PlatformSharedMemoryHandle handle, uint64_t offset) {
DCHECK_EQ(0, size % AllocatePageSize());
mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(hint);
vm_prot_t prot = GetVMProtFromMemoryPermission(access);
mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED,
shared_mem_port, offset, prot);
if (kr != KERN_SUCCESS) {
// Retry without hint.
kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port,
offset, prot);
}
if (kr != KERN_SUCCESS) return nullptr;
return reinterpret_cast<void*>(addr);
}
// static
bool OS::RemapPages(const void* address, size_t size, void* new_address,
MemoryPermission access) {
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(address), AllocatePageSize()));
DCHECK(
IsAligned(reinterpret_cast<uintptr_t>(new_address), AllocatePageSize()));
DCHECK(IsAligned(size, AllocatePageSize()));
vm_prot_t cur_protection = GetVMProtFromMemoryPermission(access);
vm_prot_t max_protection;
// Asks the kernel to remap *on top* of an existing mapping, rather than
// copying the data.
int flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
mach_vm_address_t target = reinterpret_cast<mach_vm_address_t>(new_address);
kern_return_t ret =
mach_vm_remap(mach_task_self(), &target, size, 0, flags, mach_task_self(),
reinterpret_cast<mach_vm_address_t>(address), FALSE,
&cur_protection, &max_protection, VM_INHERIT_NONE);
if (ret != KERN_SUCCESS) return false;
// Did we get the address we wanted?
CHECK_EQ(new_address, reinterpret_cast<void*>(target));
return true;
}
bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
OS::MemoryPermission access,
PlatformSharedMemoryHandle handle,
uint64_t offset) {
DCHECK(Contains(address, size));
vm_prot_t prot = GetVMProtFromMemoryPermission(access);
mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(address);
mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
kern_return_t kr =
mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
shared_mem_port, offset, prot);
return kr == KERN_SUCCESS;
}
// See platform-ios.cc for the iOS implementation.
#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT && !defined(V8_OS_IOS)
// Ignoring this warning is considered better than relying on
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
V8_BASE_EXPORT void SetJitWriteProtected(int enable) {
pthread_jit_write_protect_np(enable);
}
#pragma clang diagnostic pop
#endif
} // namespace base
} // namespace v8

View file

@ -104,7 +104,7 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
}
// static
Stack::StackSlot Stack::GetStackStart() {
Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
pthread_attr_t attr;
int error;
pthread_attr_init(&attr);

View file

@ -293,9 +293,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
}
void OS::SetDataReadOnly(void* address, size_t size) {
// TODO(v8:13194): Figure out which API to use on fuchsia. {vmar.protect}
// fails.
// CHECK(OS::SetPermissions(address, size, MemoryPermission::kRead));
CHECK(OS::SetPermissions(address, size, MemoryPermission::kRead));
}
// static

Some files were not shown because too many files have changed in this diff Show more