mirror of
https://github.com/nodejs/node.git
synced 2025-08-15 13:48:44 +02:00
deps: update V8 to 13.7.152.9
PR-URL: https://github.com/nodejs/node/pull/58064 Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com> Reviewed-By: Richard Lau <rlau@redhat.com> Reviewed-By: Darshan Sen <raisinten@gmail.com> Reviewed-By: Marco Ippolito <marcoippolito54@gmail.com> Reviewed-By: Paolo Insogna <paolo@cowtech.it> Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com>
This commit is contained in:
parent
ccf227eac8
commit
fff0d1554d
1263 changed files with 35246 additions and 19433 deletions
3
deps/v8/.git-blame-ignore-revs
vendored
3
deps/v8/.git-blame-ignore-revs
vendored
|
@ -85,6 +85,9 @@ e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a
|
|||
# Rewrite code base to use "." instead of "->" to access Object members.
|
||||
878ccb33bd3cf0e6dc018ff8d15843f585ac07be
|
||||
|
||||
# Rewrite code base to use "->" instead of "." to access Object members.
|
||||
95532da70de14206e64060647082766a293f81cb
|
||||
|
||||
# Splitting src/ into subfolders
|
||||
632239011db501e76475d82ff6492f37fa8c1edc
|
||||
f455f86d899716df3b9550950ce172f5b867619a
|
||||
|
|
5
deps/v8/.gn
vendored
5
deps/v8/.gn
vendored
|
@ -30,10 +30,13 @@ default_args = {
|
|||
|
||||
# Disable rust dependencies.
|
||||
enable_rust = true
|
||||
|
||||
# Needed only for std::atomic_ref<T> for large Ts http://crbug.com/402171653
|
||||
use_llvm_libatomic = false
|
||||
}
|
||||
|
||||
# These are the list of GN files that run exec_script. This whitelist exists
|
||||
# to force additional review for new uses of exec_script, which is strongly
|
||||
# discouraged except for gypi_to_gn calls.
|
||||
exec_script_whitelist = build_dotfile_settings.exec_script_whitelist +
|
||||
exec_script_allowlist = build_dotfile_settings.exec_script_allowlist +
|
||||
[ "//build_overrides/build.gni" ]
|
||||
|
|
17
deps/v8/.vpython3
vendored
17
deps/v8/.vpython3
vendored
|
@ -22,7 +22,7 @@
|
|||
# Read more about `vpython` and how to modify this file here:
|
||||
# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md
|
||||
|
||||
python_version: "3.8"
|
||||
python_version: "3.11"
|
||||
|
||||
# The default set of platforms vpython checks does not yet include mac-arm64.
|
||||
# Setting `verify_pep425_tag` to the list of platforms we explicitly must support
|
||||
|
@ -47,7 +47,7 @@ wheel: <
|
|||
|
||||
wheel: <
|
||||
name: "infra/python/wheels/coverage/${vpython_platform}"
|
||||
version: "version:5.5.chromium.3"
|
||||
version: "version:7.3.1"
|
||||
>
|
||||
|
||||
wheel: <
|
||||
|
@ -55,6 +55,11 @@ wheel: <
|
|||
version: "version:3.0.0"
|
||||
>
|
||||
|
||||
wheel: <
|
||||
name: "infra/python/wheels/filecheck-py2_py3"
|
||||
version: "version:1.0.1"
|
||||
>
|
||||
|
||||
wheel: <
|
||||
name: "infra/python/wheels/funcsigs-py2_py3"
|
||||
version: "version:1.0.2"
|
||||
|
@ -67,7 +72,7 @@ wheel: <
|
|||
|
||||
wheel: <
|
||||
name: "infra/python/wheels/numpy/${vpython_platform}"
|
||||
version: "version:1.2x.supported.1"
|
||||
version: "version:1.23.5.chromium.4"
|
||||
>
|
||||
|
||||
wheel: <
|
||||
|
@ -97,6 +102,6 @@ wheel: <
|
|||
version: "version:2.0.4"
|
||||
>
|
||||
wheel: <
|
||||
name: "infra/python/wheels/pyfakefs-py2_py3"
|
||||
version: "version:3.7.2"
|
||||
>
|
||||
name: "infra/python/wheels/pyfakefs-py3"
|
||||
version: "version:5.7.3"
|
||||
>
|
||||
|
|
1
deps/v8/AUTHORS
vendored
1
deps/v8/AUTHORS
vendored
|
@ -334,3 +334,4 @@ Kotaro Ohsugi <dec4m4rk@gmail.com>
|
|||
Jing Peiyang <jingpeiyang@eswincomputing.com>
|
||||
magic-akari <akari.ccino@gmail.com>
|
||||
Ryuhei Shima <shimaryuhei@gmail.com>
|
||||
Domagoj Stolfa <domagoj.stolfa@gmail.com>
|
||||
|
|
41
deps/v8/BUILD.bazel
vendored
41
deps/v8/BUILD.bazel
vendored
|
@ -41,7 +41,6 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
|
|||
# v8_enable_trace_feedback_updates
|
||||
# v8_enable_atomic_object_field_writes
|
||||
# v8_enable_concurrent_marking
|
||||
# v8_enable_conservative_stack_scanning
|
||||
# v8_enable_direct_handle
|
||||
# v8_enable_local_off_stack_check
|
||||
# v8_enable_ignition_dispatch_counting
|
||||
|
@ -671,6 +670,7 @@ filegroup(
|
|||
"include/cppgc/process-heap-statistics.h",
|
||||
"include/cppgc/sentinel-pointer.h",
|
||||
"include/cppgc/source-location.h",
|
||||
"include/cppgc/tagged-member.h",
|
||||
"include/cppgc/trace-trait.h",
|
||||
"include/cppgc/type-traits.h",
|
||||
"include/cppgc/visitor.h",
|
||||
|
@ -685,6 +685,7 @@ filegroup(
|
|||
"include/v8-callbacks.h",
|
||||
"include/v8-container.h",
|
||||
"include/v8-context.h",
|
||||
"include/v8-cpp-heap-external.h",
|
||||
"include/v8-cppgc.h",
|
||||
"include/v8-data.h",
|
||||
"include/v8-date.h",
|
||||
|
@ -831,6 +832,25 @@ filegroup(
|
|||
"src/base/numbers/fixed-dtoa.h",
|
||||
"src/base/numbers/strtod.cc",
|
||||
"src/base/numbers/strtod.h",
|
||||
"src/base/numerics/angle_conversions.h",
|
||||
"src/base/numerics/basic_ops_impl.h",
|
||||
"src/base/numerics/byte_conversions.h",
|
||||
"src/base/numerics/checked_math.h",
|
||||
"src/base/numerics/checked_math_impl.h",
|
||||
"src/base/numerics/clamped_math.h",
|
||||
"src/base/numerics/clamped_math_impl.h",
|
||||
"src/base/numerics/integral_constant_like.h",
|
||||
"src/base/numerics/math_constants.h",
|
||||
"src/base/numerics/ostream_operators.h",
|
||||
"src/base/numerics/ranges.h",
|
||||
"src/base/numerics/safe_conversions.h",
|
||||
"src/base/numerics/safe_conversions_arm_impl.h",
|
||||
"src/base/numerics/safe_conversions_impl.h",
|
||||
"src/base/numerics/safe_math.h",
|
||||
"src/base/numerics/safe_math_arm_impl.h",
|
||||
"src/base/numerics/safe_math_clang_gcc_impl.h",
|
||||
"src/base/numerics/safe_math_shared_impl.h",
|
||||
"src/base/numerics/wrapping_math.h",
|
||||
"src/base/once.cc",
|
||||
"src/base/once.h",
|
||||
"src/base/overflowing-math.h",
|
||||
|
@ -857,9 +877,6 @@ filegroup(
|
|||
"src/base/region-allocator.cc",
|
||||
"src/base/region-allocator.h",
|
||||
"src/base/ring-buffer.h",
|
||||
"src/base/safe_conversions.h",
|
||||
"src/base/safe_conversions_arm_impl.h",
|
||||
"src/base/safe_conversions_impl.h",
|
||||
"src/base/small-map.h",
|
||||
"src/base/small-vector.h",
|
||||
"src/base/string-format.h",
|
||||
|
@ -1129,6 +1146,7 @@ filegroup(
|
|||
"src/objects/call-site-info.tq",
|
||||
"src/objects/cell.tq",
|
||||
"src/objects/contexts.tq",
|
||||
"src/objects/cpp-heap-external-object.tq",
|
||||
"src/objects/data-handler.tq",
|
||||
"src/objects/debug-objects.tq",
|
||||
"src/objects/descriptor-array.tq",
|
||||
|
@ -1430,6 +1448,8 @@ filegroup(
|
|||
"src/codegen/interface-descriptors.cc",
|
||||
"src/codegen/interface-descriptors.h",
|
||||
"src/codegen/interface-descriptors-inl.h",
|
||||
"src/codegen/jump-table-info.cc",
|
||||
"src/codegen/jump-table-info.h",
|
||||
"src/codegen/label.h",
|
||||
"src/codegen/linkage-location.h",
|
||||
"src/codegen/machine-type.cc",
|
||||
|
@ -1723,7 +1743,6 @@ filegroup(
|
|||
"src/heap/heap-write-barrier-inl.h",
|
||||
"src/heap/incremental-marking.cc",
|
||||
"src/heap/incremental-marking.h",
|
||||
"src/heap/incremental-marking-inl.h",
|
||||
"src/heap/incremental-marking-job.cc",
|
||||
"src/heap/incremental-marking-job.h",
|
||||
"src/heap/index-generator.cc",
|
||||
|
@ -1982,6 +2001,10 @@ filegroup(
|
|||
"src/objects/contexts.cc",
|
||||
"src/objects/contexts.h",
|
||||
"src/objects/contexts-inl.h",
|
||||
"src/objects/cpp-heap-external-object.h",
|
||||
"src/objects/cpp-heap-external-object-inl.h",
|
||||
"src/objects/cpp-heap-object-wrapper.h",
|
||||
"src/objects/cpp-heap-object-wrapper-inl.h",
|
||||
"src/objects/data-handler.h",
|
||||
"src/objects/data-handler-inl.h",
|
||||
"src/objects/debug-objects.cc",
|
||||
|
@ -2604,8 +2627,6 @@ filegroup(
|
|||
"src/codegen/x64/assembler-x64.cc",
|
||||
"src/codegen/x64/assembler-x64.h",
|
||||
"src/codegen/x64/assembler-x64-inl.h",
|
||||
"src/codegen/x64/builtin-jump-table-info-x64.cc",
|
||||
"src/codegen/x64/builtin-jump-table-info-x64.h",
|
||||
"src/codegen/x64/constants-x64.h",
|
||||
"src/codegen/x64/cpu-x64.cc",
|
||||
"src/codegen/x64/fma-instr.h",
|
||||
|
@ -4340,6 +4361,12 @@ cc_library(
|
|||
name = "simdutf",
|
||||
srcs = ["third_party/simdutf/simdutf.cpp"],
|
||||
hdrs = ["third_party/simdutf/simdutf.h"],
|
||||
copts = select({
|
||||
"@v8//bazel/config:is_clang": ["-std=c++20"],
|
||||
"@v8//bazel/config:is_gcc": ["-std=gnu++2a"],
|
||||
"@v8//bazel/config:is_windows": ["/std:c++20"],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
v8_library(
|
||||
|
|
92
deps/v8/BUILD.gn
vendored
92
deps/v8/BUILD.gn
vendored
|
@ -25,12 +25,6 @@ if (is_ios) {
|
|||
import("//build/config/apple/mobile_config.gni") # For `target_platform`.
|
||||
}
|
||||
|
||||
# Specifies if the target build is a simulator build. Comparing target cpu
|
||||
# with v8 target cpu to not affect simulator builds for making cross-compile
|
||||
# snapshots.
|
||||
target_is_simulator = (target_cpu != v8_target_cpu && !v8_multi_arch_build) ||
|
||||
(current_cpu != v8_current_cpu && v8_multi_arch_build)
|
||||
|
||||
# For faster Windows builds. See https://crbug.com/v8/8475.
|
||||
emit_builtins_as_inline_asm = is_win && is_clang
|
||||
|
||||
|
@ -323,10 +317,11 @@ declare_args() {
|
|||
|
||||
# Enable control-flow integrity features, such as pointer authentication for
|
||||
# ARM64. Enable it by default for simulator builds and when native code
|
||||
# supports it as well.
|
||||
v8_control_flow_integrity =
|
||||
v8_current_cpu == "arm64" &&
|
||||
(target_is_simulator || arm_control_flow_integrity != "none")
|
||||
# supports it as well. On Mac, control-flow integrity does not work so we
|
||||
# avoid enabling it when using the simulator.
|
||||
v8_control_flow_integrity = v8_current_cpu == "arm64" &&
|
||||
((v8_target_is_simulator && target_os != "mac") ||
|
||||
arm_control_flow_integrity != "none")
|
||||
|
||||
# Enable heap reservation of size 4GB. Only possible for 64bit archs.
|
||||
cppgc_enable_caged_heap =
|
||||
|
@ -361,6 +356,10 @@ declare_args() {
|
|||
# Requires use_rtti = true
|
||||
v8_enable_precise_zone_stats = false
|
||||
|
||||
# Set this if V8 monolithic static library is going to be linked into
|
||||
# another shared library.
|
||||
v8_monolithic_for_shared_library = false
|
||||
|
||||
# Experimental feature that uses SwissNameDictionary instead of NameDictionary
|
||||
# as the backing store for all dictionary mode objects.
|
||||
v8_enable_swiss_name_dictionary = false
|
||||
|
@ -739,7 +738,8 @@ if (v8_builtins_profiling_log_file == "default") {
|
|||
}
|
||||
}
|
||||
|
||||
if (v8_enable_webassembly && !target_is_simulator && v8_current_cpu == "x64") {
|
||||
if (v8_enable_webassembly && !v8_target_is_simulator &&
|
||||
v8_current_cpu == "x64") {
|
||||
v8_enable_wasm_simd256_revec = true
|
||||
}
|
||||
|
||||
|
@ -989,7 +989,6 @@ external_v8_defines = [
|
|||
"V8_USE_PERFETTO",
|
||||
"V8_MAP_PACKING",
|
||||
"V8_IS_TSAN",
|
||||
"V8_ENABLE_CONSERVATIVE_STACK_SCANNING",
|
||||
"V8_ENABLE_DIRECT_HANDLE",
|
||||
"V8_MINORMS_STRING_SHORTCUTTING",
|
||||
"V8_HAVE_TARGET_OS",
|
||||
|
@ -1044,9 +1043,6 @@ if (v8_enable_map_packing) {
|
|||
if (is_tsan) {
|
||||
enabled_external_v8_defines += [ "V8_IS_TSAN" ]
|
||||
}
|
||||
if (v8_enable_conservative_stack_scanning) {
|
||||
enabled_external_v8_defines += [ "V8_ENABLE_CONSERVATIVE_STACK_SCANNING" ]
|
||||
}
|
||||
if (v8_enable_direct_handle) {
|
||||
enabled_external_v8_defines += [ "V8_ENABLE_DIRECT_HANDLE" ]
|
||||
}
|
||||
|
@ -1184,6 +1180,10 @@ config("features") {
|
|||
defines += [ "CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS" ]
|
||||
}
|
||||
|
||||
if (v8_monolithic && v8_monolithic_for_shared_library) {
|
||||
defines += [ "V8_TLS_USED_IN_LIBRARY" ]
|
||||
}
|
||||
|
||||
if (v8_enable_pointer_compression &&
|
||||
!v8_enable_pointer_compression_shared_cage) {
|
||||
defines += [ "V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES" ]
|
||||
|
@ -1447,6 +1447,9 @@ config("features") {
|
|||
if (v8_lower_limits_mode) {
|
||||
defines += [ "V8_LOWER_LIMITS_MODE" ]
|
||||
}
|
||||
if (v8_target_is_simulator) {
|
||||
defines += [ "USE_SIMULATOR" ]
|
||||
}
|
||||
}
|
||||
|
||||
config("toolchain") {
|
||||
|
@ -1497,7 +1500,7 @@ config("toolchain") {
|
|||
}
|
||||
|
||||
# Mips64el simulators.
|
||||
if (target_is_simulator && v8_current_cpu == "mips64el") {
|
||||
if (v8_target_is_simulator && v8_current_cpu == "mips64el") {
|
||||
defines += [ "_MIPS_TARGET_SIMULATOR" ]
|
||||
}
|
||||
|
||||
|
@ -1530,7 +1533,7 @@ config("toolchain") {
|
|||
}
|
||||
|
||||
# loong64 simulators.
|
||||
if (target_is_simulator && v8_current_cpu == "loong64") {
|
||||
if (v8_target_is_simulator && v8_current_cpu == "loong64") {
|
||||
defines += [ "_LOONG64_TARGET_SIMULATOR" ]
|
||||
}
|
||||
if (v8_current_cpu == "loong64") {
|
||||
|
@ -1563,10 +1566,10 @@ config("toolchain") {
|
|||
|
||||
# Under simulator build, compiler will not provide __riscv_xlen. Define here
|
||||
if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
|
||||
if (target_is_simulator) {
|
||||
if (v8_target_is_simulator) {
|
||||
defines += [ "_RISCV_TARGET_SIMULATOR" ]
|
||||
}
|
||||
if (riscv_use_rvv || target_is_simulator) {
|
||||
if (riscv_use_rvv || v8_target_is_simulator) {
|
||||
defines += [ "CAN_USE_RVV_INSTRUCTIONS" ]
|
||||
defines += [ "RVV_VLEN=${riscv_rvv_vlen}" ]
|
||||
}
|
||||
|
@ -1823,6 +1826,10 @@ if (v8_postmortem_support) {
|
|||
"src/objects/casting-inl.h",
|
||||
"src/objects/code.h",
|
||||
"src/objects/code-inl.h",
|
||||
"src/objects/cpp-heap-external-object.h",
|
||||
"src/objects/cpp-heap-external-object-inl.h",
|
||||
"src/objects/cpp-heap-object-wrapper.h",
|
||||
"src/objects/cpp-heap-object-wrapper-inl.h",
|
||||
"src/objects/data-handler.h",
|
||||
"src/objects/data-handler-inl.h",
|
||||
"src/objects/deoptimization-data.h",
|
||||
|
@ -2061,6 +2068,7 @@ torque_files = [
|
|||
"src/objects/cell.tq",
|
||||
"src/objects/bytecode-array.tq",
|
||||
"src/objects/contexts.tq",
|
||||
"src/objects/cpp-heap-external-object.tq",
|
||||
"src/objects/data-handler.tq",
|
||||
"src/objects/debug-objects.tq",
|
||||
"src/objects/descriptor-array.tq",
|
||||
|
@ -2546,7 +2554,7 @@ template("run_mksnapshot") {
|
|||
# This is needed to distinguish between generating code for the simulator
|
||||
# and cross-compiling. The latter may need to run code on the host with the
|
||||
# simulator but cannot use simulator-specific instructions.
|
||||
if (target_is_simulator) {
|
||||
if (v8_target_is_simulator) {
|
||||
args += [ "--target_is_simulator" ]
|
||||
}
|
||||
|
||||
|
@ -2788,7 +2796,6 @@ action("v8_dump_build_config") {
|
|||
"code_comments=$v8_code_comments",
|
||||
"component_build=$is_component_build",
|
||||
"concurrent_marking=$v8_enable_concurrent_marking",
|
||||
"conservative_stack_scanning=$v8_enable_conservative_stack_scanning",
|
||||
"current_cpu=\"$current_cpu\"",
|
||||
"dcheck_always_on=$v8_dcheck_always_on",
|
||||
"debug_code=$v8_enable_debug_code",
|
||||
|
@ -2854,8 +2861,6 @@ generated_file("v8_generate_features_json") {
|
|||
contents = {
|
||||
v8_deprecation_warnings = v8_deprecation_warnings
|
||||
v8_enable_31bit_smis_on_64bit_arch = v8_enable_31bit_smis_on_64bit_arch
|
||||
v8_enable_conservative_stack_scanning =
|
||||
v8_enable_conservative_stack_scanning
|
||||
v8_enable_direct_handle = v8_enable_direct_handle
|
||||
v8_enable_extensible_ro_snapshot = v8_enable_extensible_ro_snapshot
|
||||
v8_enable_gdbjit = v8_enable_gdbjit
|
||||
|
@ -3176,6 +3181,7 @@ v8_header_set("v8_headers") {
|
|||
"include/v8-callbacks.h",
|
||||
"include/v8-container.h",
|
||||
"include/v8-context.h",
|
||||
"include/v8-cpp-heap-external.h",
|
||||
"include/v8-cppgc.h",
|
||||
"include/v8-data.h",
|
||||
"include/v8-date.h",
|
||||
|
@ -3380,6 +3386,7 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/codegen/handler-table.h",
|
||||
"src/codegen/interface-descriptors-inl.h",
|
||||
"src/codegen/interface-descriptors.h",
|
||||
"src/codegen/jump-table-info.h",
|
||||
"src/codegen/label.h",
|
||||
"src/codegen/linkage-location.h",
|
||||
"src/codegen/machine-type.h",
|
||||
|
@ -3773,7 +3780,6 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/heap/heap-write-barrier-inl.h",
|
||||
"src/heap/heap-write-barrier.h",
|
||||
"src/heap/heap.h",
|
||||
"src/heap/incremental-marking-inl.h",
|
||||
"src/heap/incremental-marking-job.h",
|
||||
"src/heap/incremental-marking.h",
|
||||
"src/heap/index-generator.h",
|
||||
|
@ -3937,6 +3943,10 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/objects/compressed-slots.h",
|
||||
"src/objects/contexts-inl.h",
|
||||
"src/objects/contexts.h",
|
||||
"src/objects/cpp-heap-external-object-inl.h",
|
||||
"src/objects/cpp-heap-external-object.h",
|
||||
"src/objects/cpp-heap-object-wrapper-inl.h",
|
||||
"src/objects/cpp-heap-object-wrapper.h",
|
||||
"src/objects/data-handler-inl.h",
|
||||
"src/objects/data-handler.h",
|
||||
"src/objects/debug-objects-inl.h",
|
||||
|
@ -4610,7 +4620,6 @@ v8_header_set("v8_internal_headers") {
|
|||
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h",
|
||||
"src/codegen/x64/assembler-x64-inl.h",
|
||||
"src/codegen/x64/assembler-x64.h",
|
||||
"src/codegen/x64/builtin-jump-table-info-x64.h",
|
||||
"src/codegen/x64/constants-x64.h",
|
||||
"src/codegen/x64/fma-instr.h",
|
||||
"src/codegen/x64/interface-descriptors-x64-inl.h",
|
||||
|
@ -4711,8 +4720,10 @@ v8_header_set("v8_internal_headers") {
|
|||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
|
||||
sources += [ "src/trap-handler/handler-inside-posix.h" ]
|
||||
}
|
||||
if (current_cpu == "x64" &&
|
||||
(is_linux || is_chromeos || is_mac || is_win)) {
|
||||
if ((current_cpu == "x64" &&
|
||||
(is_linux || is_chromeos || is_mac || is_win)) ||
|
||||
(current_cpu == "arm64" && v8_target_is_simulator &&
|
||||
(is_linux || is_mac))) {
|
||||
sources += [ "src/trap-handler/trap-handler-simulator.h" ]
|
||||
}
|
||||
}
|
||||
|
@ -5401,6 +5412,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/codegen/flush-instruction-cache.cc",
|
||||
"src/codegen/handler-table.cc",
|
||||
"src/codegen/interface-descriptors.cc",
|
||||
"src/codegen/jump-table-info.cc",
|
||||
"src/codegen/machine-type.cc",
|
||||
"src/codegen/macro-assembler-base.cc",
|
||||
"src/codegen/maglev-safepoint-table.cc",
|
||||
|
@ -5984,7 +5996,6 @@ v8_source_set("v8_base_without_compiler") {
|
|||
### gcmole(x64) ###
|
||||
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
|
||||
"src/codegen/x64/assembler-x64.cc",
|
||||
"src/codegen/x64/builtin-jump-table-info-x64.cc",
|
||||
"src/codegen/x64/cpu-x64.cc",
|
||||
"src/codegen/x64/macro-assembler-x64.cc",
|
||||
"src/deoptimizer/x64/deoptimizer-x64.cc",
|
||||
|
@ -6065,7 +6076,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||
"src/trap-handler/handler-outside-win.cc",
|
||||
]
|
||||
}
|
||||
if (current_cpu == "x64" &&
|
||||
if ((current_cpu == "x64" || current_cpu == "arm64") &&
|
||||
(is_linux || is_chromeos || is_mac || is_win)) {
|
||||
sources += [ "src/trap-handler/handler-outside-simulator.cc" ]
|
||||
}
|
||||
|
@ -6529,6 +6540,25 @@ v8_component("v8_libbase") {
|
|||
"src/base/numbers/fixed-dtoa.h",
|
||||
"src/base/numbers/strtod.cc",
|
||||
"src/base/numbers/strtod.h",
|
||||
"src/base/numerics/angle_conversions.h",
|
||||
"src/base/numerics/basic_ops_impl.h",
|
||||
"src/base/numerics/byte_conversions.h",
|
||||
"src/base/numerics/checked_math.h",
|
||||
"src/base/numerics/checked_math_impl.h",
|
||||
"src/base/numerics/clamped_math.h",
|
||||
"src/base/numerics/clamped_math_impl.h",
|
||||
"src/base/numerics/integral_constant_like.h",
|
||||
"src/base/numerics/math_constants.h",
|
||||
"src/base/numerics/ostream_operators.h",
|
||||
"src/base/numerics/ranges.h",
|
||||
"src/base/numerics/safe_conversions.h",
|
||||
"src/base/numerics/safe_conversions_arm_impl.h",
|
||||
"src/base/numerics/safe_conversions_impl.h",
|
||||
"src/base/numerics/safe_math.h",
|
||||
"src/base/numerics/safe_math_arm_impl.h",
|
||||
"src/base/numerics/safe_math_clang_gcc_impl.h",
|
||||
"src/base/numerics/safe_math_shared_impl.h",
|
||||
"src/base/numerics/wrapping_math.h",
|
||||
"src/base/once.cc",
|
||||
"src/base/once.h",
|
||||
"src/base/overflowing-math.h",
|
||||
|
@ -6554,9 +6584,6 @@ v8_component("v8_libbase") {
|
|||
"src/base/region-allocator.cc",
|
||||
"src/base/region-allocator.h",
|
||||
"src/base/ring-buffer.h",
|
||||
"src/base/safe_conversions.h",
|
||||
"src/base/safe_conversions_arm_impl.h",
|
||||
"src/base/safe_conversions_impl.h",
|
||||
"src/base/sanitizer/asan.h",
|
||||
"src/base/sanitizer/lsan-page-allocator.cc",
|
||||
"src/base/sanitizer/lsan-page-allocator.h",
|
||||
|
@ -7010,6 +7037,7 @@ v8_header_set("cppgc_headers") {
|
|||
"include/cppgc/process-heap-statistics.h",
|
||||
"include/cppgc/sentinel-pointer.h",
|
||||
"include/cppgc/source-location.h",
|
||||
"include/cppgc/tagged-member.h",
|
||||
|
||||
# TODO(v8:11952): Remove the testing header here once depending on both,
|
||||
# //v8:v8 and //v8:v8_for_testing does not result in ODR violations.
|
||||
|
|
248
deps/v8/DEPS
vendored
248
deps/v8/DEPS
vendored
|
@ -74,24 +74,24 @@ vars = {
|
|||
'build_with_chromium': False,
|
||||
|
||||
# GN CIPD package version.
|
||||
'gn_version': 'git_revision:6e8e0d6d4a151ab2ed9b4a35366e630c55888444',
|
||||
'gn_version': 'git_revision:90478db6b59b9bebf7ca4cf912d860cf868e724c',
|
||||
|
||||
# ninja CIPD package version
|
||||
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
|
||||
'ninja_version': 'version:3@1.12.1.chromium.4',
|
||||
|
||||
# siso CIPD package version
|
||||
'siso_version': 'git_revision:68bdc49e4e23aef066fc652cbdb1b4973aab1a31',
|
||||
'siso_version': 'git_revision:70e1167e0e6dad10c8388cace8fd9d9376c43316',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling Fuchsia sdk
|
||||
# and whatever else without interference from each other.
|
||||
'fuchsia_version': 'version:27.20250326.5.1',
|
||||
'fuchsia_version': 'version:27.20250424.2.1',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling partition_alloc_version
|
||||
# and whatever else without interference from each other.
|
||||
'partition_alloc_version': 'ab56923a27b2793f21994589b0c39bc3324ff49f',
|
||||
'partition_alloc_version': '862506deb382f3f8a8fa9689c8d5136a48e9b778',
|
||||
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling android_sdk_build-tools_version
|
||||
|
@ -129,9 +129,9 @@ vars = {
|
|||
|
||||
deps = {
|
||||
'build':
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + '451ef881d77fff0b7a8bbfa61934f5e4a35b4c96',
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + '88030b320338e0706b6b93336c4b35e6bbaf467e',
|
||||
'buildtools':
|
||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '6f359296daa889aa726f3d05046b9f37be241169',
|
||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '0f32cb9025766951122d4ed19aba87a94ded3f43',
|
||||
'buildtools/linux64': {
|
||||
'packages': [
|
||||
{
|
||||
|
@ -177,7 +177,7 @@ deps = {
|
|||
'test/mozilla/data':
|
||||
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
||||
'test/test262/data':
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ce7e72d2107f99d165f4259571f10aa75753d997',
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'c5257e6119f83f856602f2ccbc46547a8fef0960',
|
||||
'third_party/android_platform': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '98aee46efb1cc4e09fa0e3ecaa6b19dc258645fa',
|
||||
'condition': 'checkout_android',
|
||||
|
@ -224,14 +224,14 @@ deps = {
|
|||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_toolchain/android_toolchain',
|
||||
'version': 'Idl-vYnWGnM8K3XJhM3h6zjYVDXlnljVz3FE00V9IM8C',
|
||||
'version': 'KXOia11cm9lVdUdPlbGLu8sCz6Y4ey_HV2s8_8qeqhgC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
'dep_type': 'cipd',
|
||||
},
|
||||
'third_party/catapult': {
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + '5bda0fdab9d93ec9963e2cd858c7b49ad7fec7d4',
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + '000f47cfa393d7f9557025a252862e2a61a60d44',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/clang-format/script':
|
||||
|
@ -245,17 +245,17 @@ deps = {
|
|||
'condition': 'checkout_android',
|
||||
},
|
||||
'third_party/depot_tools':
|
||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'f40ddcd8d51626fb7be3ab3c418b3f3be801623f',
|
||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '1fcc527019d786502b02f71b8b764ee674a40953',
|
||||
'third_party/fp16/src':
|
||||
Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '0a92994d729ff76a58f692d3028ca1b64b145d91',
|
||||
'third_party/fast_float/src':
|
||||
Var('chromium_url') + '/external/github.com/fastfloat/fast_float.git' + '@' + 'cb1d42aaa1e14b09e1452cfdef373d051b8c02a4',
|
||||
'third_party/fuchsia-gn-sdk': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '3845a68eb4421e64fbdf9f4805b5ac6d73742e08',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + 'e678aca1bad7a1c9a38620b2e328281bc68f6357',
|
||||
'condition': 'checkout_fuchsia',
|
||||
},
|
||||
'third_party/simdutf':
|
||||
Var('chromium_url') + '/chromium/src/third_party/simdutf' + '@' + '40d1fa26cd5ca221605c974e22c001ca2fb12fde',
|
||||
Var('chromium_url') + '/chromium/src/third_party/simdutf' + '@' + '62d1cfb62967c0076c997a10d54d50f9571fb8e9',
|
||||
# Exists for rolling the Fuchsia SDK. Check out of the SDK should always
|
||||
# rely on the hook running |update_sdk.py| script below.
|
||||
'third_party/fuchsia-sdk/sdk': {
|
||||
|
@ -269,21 +269,21 @@ deps = {
|
|||
'dep_type': 'cipd',
|
||||
},
|
||||
'third_party/google_benchmark_chrome': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + '917e1208b42fdce63511e401067677ffee3a5c7d',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + '29e4389fdc1eeb9137eb464b7f34e07c01c2731e',
|
||||
},
|
||||
'third_party/google_benchmark_chrome/src': {
|
||||
'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '761305ec3b33abf30e08d50eb829e19a802581cc',
|
||||
},
|
||||
'third_party/fuzztest':
|
||||
Var('chromium_url') + '/chromium/src/third_party/fuzztest.git' + '@' + 'df29ed1355d06c486e17fc421767ff01af050ca4',
|
||||
Var('chromium_url') + '/chromium/src/third_party/fuzztest.git' + '@' + '4a7e9c055e63f4d67e04229ab491eaefe409addf',
|
||||
'third_party/fuzztest/src':
|
||||
Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '3c7bc855a4938c5d0d1d07303aa0697c88d33e6c',
|
||||
Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + 'b10387fdbbca18192f85eaa5323a59f44bf9c468',
|
||||
'third_party/googletest/src':
|
||||
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '52204f78f94d7512df1f0f3bea1d47437a2c3a58',
|
||||
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'cd430b47a54841ec45d64d2377d7cabaf0eba610',
|
||||
'third_party/highway/src':
|
||||
Var('chromium_url') + '/external/github.com/google/highway.git' + '@' + '00fe003dac355b979f36157f9407c7c46448958e',
|
||||
'third_party/icu':
|
||||
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c9fb4b3a6fb54aa8c20a03bbcaa0a4a985ffd34b',
|
||||
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '4c8cc4b365a505ce35be1e0bd488476c5f79805d',
|
||||
'third_party/instrumented_libs': {
|
||||
'url': Var('chromium_url') + '/chromium/third_party/instrumented_libraries.git' + '@' + '69015643b3f68dbd438c010439c59adc52cac808',
|
||||
'condition': 'checkout_instrumented_libraries',
|
||||
|
@ -299,155 +299,155 @@ deps = {
|
|||
'third_party/jsoncpp/source':
|
||||
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
|
||||
'third_party/libc++/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '449310fe2e37834a7e62972d2a690cade2ef596b',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '917609c669e43edc850eeb192a342434a54e1dfd',
|
||||
'third_party/libc++abi/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '94c5d7a8edc09f0680aee57548c0b5d400c2840d',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'f2a7f2987f9dcdf8b04c2d8cd4dcb186641a7c3e',
|
||||
'third_party/libunwind/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'e2e6f2a67e9420e770b014ce9bba476fa2ab9874',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '81e2cb40a70de2b6978e6d8658891ded9a77f7e3',
|
||||
'third_party/llvm-libc/src':
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libc.git' + '@' + '188329a7f2118a957efbb3e6219c255e7dba997c',
|
||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libc.git' + '@' + '912274164f0877ca917c06e8484ad3be1784833a',
|
||||
'third_party/llvm-build/Release+Asserts': {
|
||||
'dep_type': 'gcs',
|
||||
'bucket': 'chromium-browser-clang',
|
||||
'objects': [
|
||||
{
|
||||
'object_name': 'Linux_x64/clang-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '790fcc5b04e96882e8227ba7994161ab945c0e096057fc165a0f71e32a7cb061',
|
||||
'size_bytes': 54517328,
|
||||
'generation': 1742541959624765,
|
||||
'object_name': 'Linux_x64/clang-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '2cccd3a5b04461f17a2e78d2f8bd18b448443a9dd4d6dfac50e8e84b4d5176f1',
|
||||
'size_bytes': 54914604,
|
||||
'generation': 1745271343199398,
|
||||
'condition': 'host_os == "linux"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/clang-tidy-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '6e325d9f62e831bfbae23413a75535a851fd2cdf2f7cf06a5b724e86f72b2df0',
|
||||
'size_bytes': 13206280,
|
||||
'generation': 1742541959572183,
|
||||
'object_name': 'Linux_x64/clang-tidy-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': 'f0e7dae567266055c9cfa2fba5b3dafa311dc86955f5a3f7a4047ce3096e7b27',
|
||||
'size_bytes': 13559360,
|
||||
'generation': 1745271343282399,
|
||||
'condition': 'host_os == "linux" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/clangd-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '95d4146cb0b892db68c55bbb523b30301e538d0f4dc71517612fdee62664b81a',
|
||||
'size_bytes': 13566616,
|
||||
'generation': 1742541959718102,
|
||||
'object_name': 'Linux_x64/clangd-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': 'd87ec8e9cd959cf5d12e0de2970f4a88a67f9884467dac5285813d02bbe50bcb',
|
||||
'size_bytes': 13767836,
|
||||
'generation': 1745271343386108,
|
||||
'condition': 'host_os == "linux" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': 'a10c2831ad30275a72f2955e65e62f6af78542f380661443dab4d20c65f203a4',
|
||||
'size_bytes': 2299292,
|
||||
'generation': 1742541960157221,
|
||||
'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '08f9cdbdc1e3f78dfb11aa9815727e8af0cf8f2b9c9a0e3749ceb4d3584fc900',
|
||||
'size_bytes': 2293720,
|
||||
'generation': 1745271343569971,
|
||||
'condition': 'host_os == "linux" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Linux_x64/llvmobjdump-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '56a5bb654a4550d332f86a23e38a0495c6187092868af817ecb999bd9de9c8a0',
|
||||
'size_bytes': 5429676,
|
||||
'generation': 1742541959869492,
|
||||
'object_name': 'Linux_x64/llvmobjdump-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '4b9c20478c015a03a44842d0bc24a9bd01a87890c76c4496577843ea31a21ed1',
|
||||
'size_bytes': 5702536,
|
||||
'generation': 1745271343407073,
|
||||
'condition': '(checkout_linux or checkout_mac or checkout_android and host_os != "mac")',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clang-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '330f8c4cdde3095ac54aff772dbf9bbd96a753df58525546931cbd9bb615f793',
|
||||
'size_bytes': 51652432,
|
||||
'generation': 1742541961981004,
|
||||
'object_name': 'Mac/clang-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '159cc811ee2882098086a426e83cb6744ff59d422d005a54630bc519e782d154',
|
||||
'size_bytes': 51986012,
|
||||
'generation': 1745271345031799,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clang-mac-runtime-library-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '013f468c65fae6f736cd380791fef892a0fc9fc107516fcae34d1f998eeb081f',
|
||||
'size_bytes': 978248,
|
||||
'generation': 1742541983231339,
|
||||
'object_name': 'Mac/clang-mac-runtime-library-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': 'ef380bc751dc8b137e294ac1aca295f3e49eb57b938ab011c38c70271d8582fc',
|
||||
'size_bytes': 988872,
|
||||
'generation': 1745271352425938,
|
||||
'condition': 'checkout_mac and not host_os == "mac"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clang-tidy-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': 'f29391d18e9fa40774e0386224235890933a8b9eddb9b7eb93d2a4a0867241a1',
|
||||
'size_bytes': 13468608,
|
||||
'generation': 1742541962672221,
|
||||
'object_name': 'Mac/clang-tidy-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '75907ac8d2ab310fd7272715c5d98cd4382dbd0b867872aa9216cede48c274d5',
|
||||
'size_bytes': 13609872,
|
||||
'generation': 1745271345094426,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/clangd-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '06c48661d55a7b465d8fb02be56f8550c34d3962a9d0f8ce19b17bdd37127691',
|
||||
'size_bytes': 15012228,
|
||||
'generation': 1742541962463652,
|
||||
'object_name': 'Mac/clangd-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': 'e2bcab0b3961fdc7a63286cf7a98397026ff1b5143d34c8a50844b26a7b023c6',
|
||||
'size_bytes': 14998604,
|
||||
'generation': 1745271345196743,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/llvm-code-coverage-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '12f3accae43fa9591bbf28a8e0785b99ff75ed2c84f89518bd5ef5119a2525f0',
|
||||
'size_bytes': 2255296,
|
||||
'generation': 1742541963013464,
|
||||
'object_name': 'Mac/llvm-code-coverage-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '6e4c8ed691948981d799f4af747288cdd5e90ae873dc36ada66726ad3e6caef1',
|
||||
'size_bytes': 2262400,
|
||||
'generation': 1745271345385127,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/clang-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '49967dab3fa4c5f1ff1fe235059be71727c190ff4ccc80f08d39e1bba4dfed58',
|
||||
'size_bytes': 43810156,
|
||||
'generation': 1742541984650930,
|
||||
'object_name': 'Mac_arm64/clang-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '3d437a643cc5838963254a39ab0528f49f2b65cd4dba2c80e628ad88eb419536',
|
||||
'size_bytes': 43999512,
|
||||
'generation': 1745271353863965,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/clang-tidy-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '4f5326253ed3736ec262e8e69d93befadf9473419865240673a2ec883c3614b6',
|
||||
'size_bytes': 11607236,
|
||||
'generation': 1742541984970894,
|
||||
'object_name': 'Mac_arm64/clang-tidy-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '0b4150c9e699e1e904495807aff48d2e5396527bf775d6597818dd4f73a9c38f',
|
||||
'size_bytes': 11776260,
|
||||
'generation': 1745271353927359,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/clangd-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': 'b3da417d27ba5afe6c9f612d5808c416a17ed1b28f2acd745e0cd2962a5eeac1',
|
||||
'size_bytes': 12000852,
|
||||
'generation': 1742541985144552,
|
||||
'object_name': 'Mac_arm64/clangd-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '9fcd151cfffa01e6befe3760b9bc91d645135c79449dc378af4cb2fe0187150c',
|
||||
'size_bytes': 12041956,
|
||||
'generation': 1745271354010497,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '78139b473cdf4d43da880b573661a5d28d94a8bcb4dea41607d324301745f28c',
|
||||
'size_bytes': 1976480,
|
||||
'generation': 1742541985608174,
|
||||
'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '1e62752ef5fd9d425699ed44098d5a0eec3be3f827990470aa9f9199d34a2fb8',
|
||||
'size_bytes': 1975116,
|
||||
'generation': 1745271354276821,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clang-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': 'b46fb4a5cbf9c52d0b345fc2d77ad4ac15dfbb45aa494fb49261786c679af44a',
|
||||
'size_bytes': 46813332,
|
||||
'generation': 1742542010902044,
|
||||
'object_name': 'Win/clang-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': 'd53230dbb7db57ddcda5a8377b5dd8388deee9ff2766617d54c6159c51e806be',
|
||||
'size_bytes': 47036964,
|
||||
'generation': 1745271363166454,
|
||||
'condition': 'host_os == "win"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clang-tidy-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '50d527e811ae8543effa2bb5734a1d424d9c497fbf1d96c76d44b6b5ee7f240b',
|
||||
'size_bytes': 13233236,
|
||||
'generation': 1742542011983982,
|
||||
'object_name': 'Win/clang-tidy-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': 'c6022f5923be5afc1685723a2383fcd8f9334cc6ee097ce3c71963de6ded0764',
|
||||
'size_bytes': 13415856,
|
||||
'generation': 1745271363272778,
|
||||
'condition': 'host_os == "win" and checkout_clang_tidy',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clang-win-runtime-library-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': 'bd2ad1930c0ba7d00364dd344886fd57e16aa070ff1b6a1aade72b58d28e8275',
|
||||
'size_bytes': 2474048,
|
||||
'generation': 1742542035740788,
|
||||
'object_name': 'Win/clang-win-runtime-library-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '555a34dd110a5fe3f7578745e9f0074cc341e550bed4ec5888accffb0200a7bb',
|
||||
'size_bytes': 2483656,
|
||||
'generation': 1745271370423782,
|
||||
'condition': 'checkout_win and not host_os == "win"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/clangd-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '06563cfcb24d7196717551dfcda13ed0b97fb9e795dba06007c55ae563e824b0',
|
||||
'size_bytes': 13759668,
|
||||
'generation': 1742542011820938,
|
||||
'object_name': 'Win/clangd-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '66aafde760608c4c0de94a3947a179db8c8f93c8e474e3081b4401287abe4ee4',
|
||||
'size_bytes': 13838692,
|
||||
'generation': 1745271363368641,
|
||||
'condition': 'host_os == "win" and checkout_clangd',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/llvm-code-coverage-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': '7c2fe8784910dc05445cd7f16742e0b2a09a45fb2ba96ddd5f1d8c895ac65d44',
|
||||
'size_bytes': 2365956,
|
||||
'generation': 1742542013491786,
|
||||
'object_name': 'Win/llvm-code-coverage-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': '6944dc39b33dca3bb8f219ffb221e3f345fe56a5fa0447c60ea6a2894ae72687',
|
||||
'size_bytes': 2373032,
|
||||
'generation': 1745271363562596,
|
||||
'condition': 'host_os == "win" and checkout_clang_coverage_tools',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/llvmobjdump-llvmorg-21-init-5118-g52cd27e6-5.tar.xz',
|
||||
'sha256sum': 'a07be25cb4d565422b10001ca3595111d40bd42c47b37b41e2fff5708fe82302',
|
||||
'size_bytes': 5527784,
|
||||
'generation': 1742542012678160,
|
||||
'object_name': 'Win/llvmobjdump-llvmorg-21-init-9266-g09006611-1.tar.xz',
|
||||
'sha256sum': 'e8b3e9f7cd7512edc7c05a12e818386cdb8d43bea9affbf0bf4db83a553092a5',
|
||||
'size_bytes': 5684140,
|
||||
'generation': 1745271363450942,
|
||||
'condition': 'checkout_linux or checkout_mac or checkout_android and host_os == "win"',
|
||||
},
|
||||
],
|
||||
|
@ -473,7 +473,7 @@ deps = {
|
|||
'third_party/perfetto':
|
||||
Var('android_url') + '/platform/external/perfetto.git' + '@' + '40b529923598b739b2892a536a7692eedbed5685',
|
||||
'third_party/protobuf':
|
||||
Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + 'b714f7890b8b6ad3ff3471d3148b28c2c7bbff90',
|
||||
Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + '56b98941c7a305f54fc6c1c0a082fcb232f92954',
|
||||
'third_party/re2/src':
|
||||
Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + 'c84a140c93352cdabbfb547c531be34515b12228',
|
||||
'third_party/requests': {
|
||||
|
@ -481,39 +481,39 @@ deps = {
|
|||
'condition': 'checkout_android',
|
||||
},
|
||||
'tools/rust':
|
||||
Var('chromium_url') + '/chromium/src/tools/rust' + '@' + '7cdd3d9540f3ab428dbcc9ab83c2896c100bcdc5',
|
||||
Var('chromium_url') + '/chromium/src/tools/rust' + '@' + 'fa679ed68ee49fb99a7e924f57e4d2b6444103d6',
|
||||
'third_party/rust':
|
||||
Var('chromium_url') + '/chromium/src/third_party/rust' + '@' + 'ed577320339cd175171e9c96d3d73452ddbcbd98',
|
||||
Var('chromium_url') + '/chromium/src/third_party/rust' + '@' + '926ec544992cad0ac638f3594fe6195ed493ebff',
|
||||
'third_party/rust-toolchain': {
|
||||
'dep_type': 'gcs',
|
||||
'bucket': 'chromium-browser-clang',
|
||||
'objects': [
|
||||
{
|
||||
'object_name': 'Linux_x64/rust-toolchain-f7b43542838f0a4a6cfdb17fbeadf45002042a77-1-llvmorg-21-init-5118-g52cd27e6.tar.xz',
|
||||
'sha256sum': '213ffcc751ba5f5a4e15fc0dbcbdb94aa7dbc4b6cddd3605121cd26ff8a8b359',
|
||||
'size_bytes': 118223072,
|
||||
'generation': 1741985831167267,
|
||||
'object_name': 'Linux_x64/rust-toolchain-c8f94230282a8e8c1148f3e657f0199aad909228-1-llvmorg-21-init-9266-g09006611.tar.xz',
|
||||
'sha256sum': '378c432f7739bb5da11aad7b3a2687f8252565eae5f0dcfc55c39a15382c519c',
|
||||
'size_bytes': 118598336,
|
||||
'generation': 1745271335898717,
|
||||
'condition': 'host_os == "linux"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac/rust-toolchain-f7b43542838f0a4a6cfdb17fbeadf45002042a77-1-llvmorg-21-init-5118-g52cd27e6.tar.xz',
|
||||
'sha256sum': 'f5ad2fe26336a87713ffcad9e06ae4c1ecb4773ae496a33450a7091c5eec560c',
|
||||
'size_bytes': 111168208,
|
||||
'generation': 1741985832885972,
|
||||
'object_name': 'Mac/rust-toolchain-c8f94230282a8e8c1148f3e657f0199aad909228-1-llvmorg-21-init-9266-g09006611.tar.xz',
|
||||
'sha256sum': 'bf05c8b5e90d6904de02dca9b3e4cb5e45a1a56207e7af1fbb3a10707704a26a',
|
||||
'size_bytes': 111932536,
|
||||
'generation': 1745271337336068,
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Mac_arm64/rust-toolchain-f7b43542838f0a4a6cfdb17fbeadf45002042a77-1-llvmorg-21-init-5118-g52cd27e6.tar.xz',
|
||||
'sha256sum': 'fac3586c08239bbb8fd192a7ba5deaa9ae62f6fde2c1d665953f87176467a156',
|
||||
'size_bytes': 100534232,
|
||||
'generation': 1741985834191792,
|
||||
'object_name': 'Mac_arm64/rust-toolchain-c8f94230282a8e8c1148f3e657f0199aad909228-1-llvmorg-21-init-9266-g09006611.tar.xz',
|
||||
'sha256sum': '1aec99f479ff28cefe44ed739844833e016a1da255cf3c17d79e59a273246615',
|
||||
'size_bytes': 101605468,
|
||||
'generation': 1745271339727037,
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64"',
|
||||
},
|
||||
{
|
||||
'object_name': 'Win/rust-toolchain-f7b43542838f0a4a6cfdb17fbeadf45002042a77-1-llvmorg-21-init-5118-g52cd27e6.tar.xz',
|
||||
'sha256sum': '7b41e74c9b45ca97ca65279c605e6af878de5682fe574d1f1860d9da9b3a5909',
|
||||
'size_bytes': 180896336,
|
||||
'generation': 1741985835535129,
|
||||
'object_name': 'Win/rust-toolchain-c8f94230282a8e8c1148f3e657f0199aad909228-1-llvmorg-21-init-9266-g09006611.tar.xz',
|
||||
'sha256sum': 'b291520613a3ebc415e4576a7fa31d840a5ebf4ab9be6e9dc5d90062dc001c1e',
|
||||
'size_bytes': 193280372,
|
||||
'generation': 1745271341223097,
|
||||
'condition': 'host_os == "win"',
|
||||
},
|
||||
],
|
||||
|
@ -529,13 +529,13 @@ deps = {
|
|||
'condition': 'not build_with_chromium and host_cpu != "s390" and host_os != "zos" and host_cpu != "ppc"',
|
||||
},
|
||||
'third_party/zlib':
|
||||
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '788cb3c270e8700b425c7bdca1f9ce6b0c1400a9',
|
||||
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '1e85c01b15363d11fab81c46fe2b5c2179113f70',
|
||||
'tools/clang':
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '0078c27c43cae91e96bb28d8a4407045966e0542',
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '6c4f037a983abf14a4c8bf00e44db73cdf330a97',
|
||||
'tools/protoc_wrapper':
|
||||
Var('chromium_url') + '/chromium/src/tools/protoc_wrapper.git' + '@' + 'dbcbea90c20ae1ece442d8ef64e61c7b10e2b013',
|
||||
Var('chromium_url') + '/chromium/src/tools/protoc_wrapper.git' + '@' + '8ad6d21544b14c7f753852328d71861b363cc512',
|
||||
'third_party/abseil-cpp': {
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '3fbb10e80d80e3430224b75add53c47c7a711612',
|
||||
'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '91f1a3775e4c509c3eadd4870fc9929c0021e6e3',
|
||||
'condition': 'not build_with_chromium',
|
||||
},
|
||||
'third_party/zoslib': {
|
||||
|
|
3
deps/v8/PRESUBMIT.py
vendored
3
deps/v8/PRESUBMIT.py
vendored
|
@ -136,6 +136,7 @@ def _V8PresubmitChecks(input_api, output_api):
|
|||
input_api, output_api, bot_allowlist=[
|
||||
'v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com',
|
||||
'v8-ci-test262-import-export@chops-service-accounts.iam.gserviceaccount.com',
|
||||
'chrome-cherry-picker@chops-service-accounts.iam.gserviceaccount.com',
|
||||
]))
|
||||
return results
|
||||
|
||||
|
@ -385,7 +386,7 @@ def _CheckInlineHeadersIncludeNonInlineHeadersFirst(input_api, output_api):
|
|||
for f in input_api.AffectedSourceFiles(FilterFile):
|
||||
if not os.path.isfile(to_non_inl(f.AbsoluteLocalPath())):
|
||||
continue
|
||||
non_inl_header = to_non_inl(f.LocalPath())
|
||||
non_inl_header = to_non_inl(f.LocalPath()).replace(os.sep, '/')
|
||||
first_include = None
|
||||
for line in f.NewContents():
|
||||
if line.startswith('#include '):
|
||||
|
|
0
deps/v8/build_overrides/protobuf.gni
vendored
Normal file
0
deps/v8/build_overrides/protobuf.gni
vendored
Normal file
5
deps/v8/gni/snapshot_toolchain.gni
vendored
5
deps/v8/gni/snapshot_toolchain.gni
vendored
|
@ -104,9 +104,10 @@ if (v8_snapshot_toolchain == "") {
|
|||
# cross compile Windows arm64 with host toolchain.
|
||||
v8_snapshot_toolchain = host_toolchain
|
||||
}
|
||||
} else if (host_cpu == "arm64" && current_cpu == "arm64" &&
|
||||
} else if (host_cpu == "arm64" &&
|
||||
(current_cpu == "arm64" || current_cpu == "arm64e") &&
|
||||
host_os == "mac") {
|
||||
# cross compile iOS arm64 with host_toolchain
|
||||
# cross compile iOS arm64/arm64e with host_toolchain
|
||||
v8_snapshot_toolchain = host_toolchain
|
||||
}
|
||||
}
|
||||
|
|
23
deps/v8/gni/v8.gni
vendored
23
deps/v8/gni/v8.gni
vendored
|
@ -83,9 +83,9 @@ declare_args() {
|
|||
# (is_ios is based on host_os).
|
||||
if (target_os == "ios") {
|
||||
if (target_platform == "iphoneos") {
|
||||
# iOS executable code pages is in 17.4 SDK.
|
||||
# iOS executable code pages is in 18.4 SDK.
|
||||
# TODO(dtapuska): Change this to an assert.
|
||||
v8_enable_lite_mode = ios_deployment_target != "17.4"
|
||||
v8_enable_lite_mode = ios_deployment_target != "18.4"
|
||||
} else if (target_platform == "tvos") {
|
||||
# tvOS runs in single process mode and is not allowed to use JIT.
|
||||
# TODO(crbug.com/394710095): Enable the v8 lite mode to run v8 with the
|
||||
|
@ -123,11 +123,8 @@ declare_args() {
|
|||
!(defined(build_with_node) && build_with_node) &&
|
||||
!(is_win && is_component_build) && is_clang
|
||||
|
||||
# Scan the call stack conservatively during garbage collection.
|
||||
v8_enable_conservative_stack_scanning = false
|
||||
|
||||
# Use direct pointers in handles (v8::internal::Handle and v8::Local).
|
||||
v8_enable_direct_handle = ""
|
||||
v8_enable_direct_handle = false
|
||||
|
||||
# Check for off-stack allocated local handles.
|
||||
v8_enable_local_off_stack_check = false
|
||||
|
@ -193,6 +190,10 @@ declare_args() {
|
|||
|
||||
# Sets -DV8_ENABLE_ETW_STACK_WALKING. Enables ETW Stack Walking
|
||||
v8_enable_etw_stack_walking = is_win
|
||||
|
||||
# Specifies if the target build is a simulator build. By default it is set to
|
||||
# true if the host and target do not match and we are not cross-compiling.
|
||||
v8_target_is_simulator = ""
|
||||
}
|
||||
|
||||
if (v8_use_external_startup_data == "") {
|
||||
|
@ -250,10 +251,12 @@ if (v8_enable_turbofan == "") {
|
|||
assert(v8_enable_turbofan || !v8_enable_webassembly,
|
||||
"Webassembly is not available when Turbofan is disabled.")
|
||||
|
||||
# Direct internal handles and direct locals are enabled by default if
|
||||
# conservative stack scanning is enabled.
|
||||
if (v8_enable_direct_handle == "") {
|
||||
v8_enable_direct_handle = v8_enable_conservative_stack_scanning
|
||||
if (v8_target_is_simulator == "") {
|
||||
# We compare target cpu with v8 target cpu to not affect simulator builds for
|
||||
# making cross-compile snapshots.
|
||||
v8_target_is_simulator =
|
||||
(target_cpu != v8_target_cpu && !v8_multi_arch_build) ||
|
||||
(current_cpu != v8_current_cpu && v8_multi_arch_build)
|
||||
}
|
||||
|
||||
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
|
||||
|
|
1
deps/v8/include/DEPS
vendored
1
deps/v8/include/DEPS
vendored
|
@ -7,6 +7,7 @@ include_rules = [
|
|||
"+cppgc/heap-statistics.h",
|
||||
"+cppgc/internal/conditional-stack-allocated.h",
|
||||
"+cppgc/internal/write-barrier.h",
|
||||
"+cppgc/type-traits.h",
|
||||
"+cppgc/visitor.h",
|
||||
"+perfetto",
|
||||
]
|
||||
|
|
111
deps/v8/include/cppgc/tagged-member.h
vendored
Normal file
111
deps/v8/include/cppgc/tagged-member.h
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef INCLUDE_CPPGC_TAGGED_MEMBER_H_
|
||||
#define INCLUDE_CPPGC_TAGGED_MEMBER_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <type_traits>
|
||||
|
||||
#include "cppgc/internal/api-constants.h"
|
||||
#include "cppgc/macros.h"
|
||||
#include "cppgc/member.h"
|
||||
#include "cppgc/visitor.h"
|
||||
|
||||
namespace cppgc::subtle {
|
||||
|
||||
// The class allows to store a Member along with a single bit tag. It uses
|
||||
// distinct tag types, Tag1 and Tag2, to represent the two states of the tag.
|
||||
// The tag is stored in the least significant bit of the pointer.
|
||||
//
|
||||
// Example usage:
|
||||
// struct ParentTag {};
|
||||
// struct ShadowHostTag {};
|
||||
//
|
||||
// /* Constructs a member with the pointer to parent tag: */
|
||||
// TaggedUncompressedMember<Node, ParentTag, ShadowHostTag>
|
||||
// m(ParentTag{}, parent);
|
||||
template <typename Pointee, typename Tag1, typename Tag2>
|
||||
struct TaggedUncompressedMember final {
|
||||
CPPGC_DISALLOW_NEW();
|
||||
static constexpr uintptr_t kTagBit = 0b1;
|
||||
static_assert(kTagBit < internal::api_constants::kAllocationGranularity,
|
||||
"The tag must live in the alignment bits of the pointer.");
|
||||
|
||||
public:
|
||||
TaggedUncompressedMember(Tag1, Pointee* ptr) : ptr_(ptr) {}
|
||||
TaggedUncompressedMember(Tag2, Pointee* ptr)
|
||||
: ptr_(reinterpret_cast<Pointee*>(reinterpret_cast<uintptr_t>(ptr) |
|
||||
kTagBit)) {}
|
||||
|
||||
template <typename Tag>
|
||||
Pointee* GetAs() const {
|
||||
auto* raw = ptr_.Get();
|
||||
if constexpr (std::same_as<Tag, Tag1>) {
|
||||
CPPGC_DCHECK(Is<Tag1>());
|
||||
return raw;
|
||||
} else {
|
||||
static_assert(std::same_as<Tag, Tag2>);
|
||||
CPPGC_DCHECK(Is<Tag2>());
|
||||
return GetUntagged();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Tag>
|
||||
Pointee* TryGetAs() const {
|
||||
auto* raw = ptr_.Get();
|
||||
if constexpr (std::same_as<Tag, Tag1>) {
|
||||
return (reinterpret_cast<uintptr_t>(raw) & kTagBit) ? nullptr : raw;
|
||||
} else {
|
||||
static_assert(std::same_as<Tag, Tag2>);
|
||||
return (reinterpret_cast<uintptr_t>(raw) & kTagBit)
|
||||
? reinterpret_cast<Pointee*>(reinterpret_cast<uintptr_t>(raw) &
|
||||
~kTagBit)
|
||||
: nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
Pointee* GetUntagged() const {
|
||||
return reinterpret_cast<Pointee*>(reinterpret_cast<uintptr_t>(ptr_.Get()) &
|
||||
~kTagBit);
|
||||
}
|
||||
|
||||
template <typename Tag>
|
||||
void SetAs(Pointee* pointee) {
|
||||
if constexpr (std::same_as<Tag, Tag1>) {
|
||||
ptr_ = pointee;
|
||||
} else {
|
||||
static_assert(std::same_as<Tag, Tag2>);
|
||||
ptr_ = reinterpret_cast<Pointee*>(reinterpret_cast<uintptr_t>(pointee) |
|
||||
kTagBit);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Tag>
|
||||
bool Is() const {
|
||||
const bool tag_set = reinterpret_cast<uintptr_t>(ptr_.Get()) & kTagBit;
|
||||
if constexpr (std::same_as<Tag, Tag1>) {
|
||||
return !tag_set;
|
||||
} else {
|
||||
static_assert(std::same_as<Tag, Tag2>);
|
||||
return tag_set;
|
||||
}
|
||||
}
|
||||
|
||||
void Trace(Visitor* v) const {
|
||||
// Construct an untagged pointer and pass it to Visitor::Trace(). The plugin
|
||||
// would warn that ptr_ is untraced, which is why CPPGC_PLUGIN_IGNORE is
|
||||
// used.
|
||||
UncompressedMember<Pointee> temp(GetUntagged());
|
||||
v->Trace(temp);
|
||||
}
|
||||
|
||||
private:
|
||||
CPPGC_PLUGIN_IGNORE("See Trace()") UncompressedMember<Pointee> ptr_;
|
||||
};
|
||||
|
||||
} // namespace cppgc::subtle
|
||||
|
||||
#endif // INCLUDE_CPPGC_TAGGED_MEMBER_H_
|
4
deps/v8/include/js_protocol.pdl
vendored
4
deps/v8/include/js_protocol.pdl
vendored
|
@ -649,7 +649,7 @@ domain Debugger
|
|||
Runtime.ExecutionContextId executionContextId
|
||||
# Content hash of the script, SHA-256.
|
||||
string hash
|
||||
# For Wasm modules, the content of the `build_id` custom section.
|
||||
# For Wasm modules, the content of the `build_id` custom section. For JavaScript the `debugId` magic comment.
|
||||
string buildId
|
||||
# Embedder-specific auxiliary data likely matching {isDefault: boolean, type: 'default'|'isolated'|'worker', frameId: string}
|
||||
optional object executionContextAuxData
|
||||
|
@ -690,7 +690,7 @@ domain Debugger
|
|||
Runtime.ExecutionContextId executionContextId
|
||||
# Content hash of the script, SHA-256.
|
||||
string hash
|
||||
# For Wasm modules, the content of the `build_id` custom section.
|
||||
# For Wasm modules, the content of the `build_id` custom section. For JavaScript the `debugId` magic comment.
|
||||
string buildId
|
||||
# Embedder-specific auxiliary data likely matching {isDefault: boolean, type: 'default'|'isolated'|'worker', frameId: string}
|
||||
optional object executionContextAuxData
|
||||
|
|
4
deps/v8/include/v8-callbacks.h
vendored
4
deps/v8/include/v8-callbacks.h
vendored
|
@ -328,10 +328,6 @@ using WasmImportedStringsEnabledCallback = bool (*)(Local<Context> context);
|
|||
using SharedArrayBufferConstructorEnabledCallback =
|
||||
bool (*)(Local<Context> context);
|
||||
|
||||
// --- Callback for checking if the compile hints magic comments are enabled ---
|
||||
using JavaScriptCompileHintsMagicEnabledCallback =
|
||||
bool (*)(Local<Context> context);
|
||||
|
||||
// --- Callback for checking if WebAssembly JSPI is enabled ---
|
||||
using WasmJSPIEnabledCallback = bool (*)(Local<Context> context);
|
||||
|
||||
|
|
56
deps/v8/include/v8-cpp-heap-external.h
vendored
Normal file
56
deps/v8/include/v8-cpp-heap-external.h
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef INCLUDE_V8_HEAP_EXTERNAL_H_
|
||||
#define INCLUDE_V8_HEAP_EXTERNAL_H_
|
||||
|
||||
#include "cppgc/type-traits.h" // NOLINT(build/include_directory)
|
||||
#include "v8-sandbox.h" // NOLINT(build/include_directory)
|
||||
#include "v8-value.h" // NOLINT(build/include_directory)
|
||||
#include "v8config.h" // NOLINT(build/include_directory)
|
||||
|
||||
namespace v8 {
|
||||
|
||||
class Isolate;
|
||||
|
||||
/**
|
||||
* A JavaScript value that wraps a `cppgc::GarbageCollected<T>` object allocated
|
||||
* on the managed C++ heap (CppHeap). This type of value is mainly used to
|
||||
* associate C++ data structures which aren't exposed to JavaScript with
|
||||
* JavaScript objects.
|
||||
*/
|
||||
class V8_EXPORT CppHeapExternal : public Data {
|
||||
public:
|
||||
template <typename T>
|
||||
static Local<CppHeapExternal> New(Isolate* isolate, T* value,
|
||||
CppHeapPointerTag tag) {
|
||||
static_assert(cppgc::IsGarbageCollectedTypeV<T>,
|
||||
"Object must be of type GarbageCollected.");
|
||||
return NewImpl(isolate, value, tag);
|
||||
}
|
||||
|
||||
V8_INLINE static CppHeapExternal* Cast(Data* data) {
|
||||
#ifdef V8_ENABLE_CHECKS
|
||||
CheckCast(data);
|
||||
#endif
|
||||
return static_cast<CppHeapExternal*>(data);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T* Value(Isolate* isolate, CppHeapPointerTagRange tag_range) const {
|
||||
static_assert(cppgc::IsGarbageCollectedTypeV<T>,
|
||||
"Object must be of type GarbageCollected.");
|
||||
return static_cast<T*>(ValueImpl(isolate, tag_range));
|
||||
}
|
||||
|
||||
private:
|
||||
static void CheckCast(v8::Data* obj);
|
||||
static Local<CppHeapExternal> NewImpl(Isolate* isolate, void* value,
|
||||
CppHeapPointerTag tag);
|
||||
void* ValueImpl(Isolate*, CppHeapPointerTagRange tag_range) const;
|
||||
};
|
||||
|
||||
} // namespace v8
|
||||
|
||||
#endif // INCLUDE_V8_HEAP_EXTERNAL_H_
|
5
deps/v8/include/v8-data.h
vendored
5
deps/v8/include/v8-data.h
vendored
|
@ -57,6 +57,11 @@ class V8_EXPORT Data {
|
|||
*/
|
||||
bool IsContext() const;
|
||||
|
||||
/**
|
||||
* Returns true if this value is a `CppHeapExternal` object.
|
||||
*/
|
||||
bool IsCppHeapExternal() const;
|
||||
|
||||
private:
|
||||
Data() = delete;
|
||||
};
|
||||
|
|
14
deps/v8/include/v8-inspector.h
vendored
14
deps/v8/include/v8-inspector.h
vendored
|
@ -408,12 +408,20 @@ class V8_EXPORT V8Inspector {
|
|||
enum ClientTrustLevel { kUntrusted, kFullyTrusted };
|
||||
enum SessionPauseState { kWaitingForDebugger, kNotWaitingForDebugger };
|
||||
// TODO(chromium:1352175): remove default value once downstream change lands.
|
||||
// Deprecated: Use `connectShared` instead.
|
||||
virtual std::unique_ptr<V8InspectorSession> connect(
|
||||
int contextGroupId, Channel*, StringView state,
|
||||
ClientTrustLevel client_trust_level,
|
||||
SessionPauseState = kNotWaitingForDebugger) {
|
||||
return nullptr;
|
||||
}
|
||||
SessionPauseState = kNotWaitingForDebugger) = 0;
|
||||
|
||||
// Same as `connect` but returns a std::shared_ptr instead.
|
||||
// Embedders should not deconstruct V8 sessions while the nested run loop
|
||||
// (V8InspectorClient::runMessageLoopOnPause) is running. To partially ensure
|
||||
// this, we defer session deconstruction until no "dispatchProtocolMessages"
|
||||
// remains on the stack.
|
||||
virtual std::shared_ptr<V8InspectorSession> connectShared(
|
||||
int contextGroupId, Channel* channel, StringView state,
|
||||
ClientTrustLevel clientTrustLevel, SessionPauseState pauseState) = 0;
|
||||
|
||||
// API methods.
|
||||
virtual std::unique_ptr<V8StackTrace> createStackTrace(
|
||||
|
|
58
deps/v8/include/v8-internal.h
vendored
58
deps/v8/include/v8-internal.h
vendored
|
@ -423,6 +423,19 @@ constexpr size_t kMaxCppHeapPointers = 0;
|
|||
// which all subtypes of a given supertype use contiguous tags. This struct can
|
||||
// then be used to represent such a type range.
|
||||
//
|
||||
// As an example, consider the following type hierarchy:
|
||||
//
|
||||
// A F
|
||||
// / \
|
||||
// B E
|
||||
// / \
|
||||
// C D
|
||||
//
|
||||
// A potential type id assignment for range-based type checks is
|
||||
// {A: 0, B: 1, C: 2, D: 3, E: 4, F: 5}. With that, the type check for type A
|
||||
// would check for the range [A, E], while the check for B would check range
|
||||
// [B, D], and for F it would simply check [F, F].
|
||||
//
|
||||
// In addition, there is an option for performance tweaks: if the size of the
|
||||
// type range corresponding to a supertype is a power of two and starts at a
|
||||
// power of two (e.g. [0x100, 0x13f]), then the compiler can often optimize
|
||||
|
@ -560,7 +573,28 @@ enum ExternalPointerTag : uint16_t {
|
|||
kFunctionTemplateInfoCallbackTag = kFirstMaybeReadOnlyExternalPointerTag,
|
||||
kAccessorInfoGetterTag,
|
||||
kAccessorInfoSetterTag,
|
||||
kLastMaybeReadOnlyExternalPointerTag = kAccessorInfoSetterTag,
|
||||
|
||||
// InterceptorInfo external pointers.
|
||||
kFirstInterceptorInfoExternalPointerTag,
|
||||
kApiNamedPropertyQueryCallbackTag = kFirstInterceptorInfoExternalPointerTag,
|
||||
kApiNamedPropertyGetterCallbackTag,
|
||||
kApiNamedPropertySetterCallbackTag,
|
||||
kApiNamedPropertyDescriptorCallbackTag,
|
||||
kApiNamedPropertyDefinerCallbackTag,
|
||||
kApiNamedPropertyDeleterCallbackTag,
|
||||
kApiNamedPropertyEnumeratorCallbackTag,
|
||||
kApiIndexedPropertyQueryCallbackTag,
|
||||
kApiIndexedPropertyGetterCallbackTag,
|
||||
kApiIndexedPropertySetterCallbackTag,
|
||||
kApiIndexedPropertyDescriptorCallbackTag,
|
||||
kApiIndexedPropertyDefinerCallbackTag,
|
||||
kApiIndexedPropertyDeleterCallbackTag,
|
||||
kApiIndexedPropertyEnumeratorCallbackTag,
|
||||
kLastInterceptorInfoExternalPointerTag =
|
||||
kApiIndexedPropertyEnumeratorCallbackTag,
|
||||
|
||||
kLastMaybeReadOnlyExternalPointerTag = kLastInterceptorInfoExternalPointerTag,
|
||||
|
||||
kWasmInternalFunctionCallTargetTag,
|
||||
kWasmTypeInfoNativeTypeTag,
|
||||
kWasmExportedFunctionDataSignatureTag,
|
||||
|
@ -570,19 +604,7 @@ enum ExternalPointerTag : uint16_t {
|
|||
// Foreigns
|
||||
kFirstForeignExternalPointerTag,
|
||||
kGenericForeignTag = kFirstForeignExternalPointerTag,
|
||||
kApiNamedPropertyQueryCallbackTag,
|
||||
kApiNamedPropertyGetterCallbackTag,
|
||||
kApiNamedPropertySetterCallbackTag,
|
||||
kApiNamedPropertyDescriptorCallbackTag,
|
||||
kApiNamedPropertyDefinerCallbackTag,
|
||||
kApiNamedPropertyDeleterCallbackTag,
|
||||
kApiIndexedPropertyQueryCallbackTag,
|
||||
kApiIndexedPropertyGetterCallbackTag,
|
||||
kApiIndexedPropertySetterCallbackTag,
|
||||
kApiIndexedPropertyDescriptorCallbackTag,
|
||||
kApiIndexedPropertyDefinerCallbackTag,
|
||||
kApiIndexedPropertyDeleterCallbackTag,
|
||||
kApiIndexedPropertyEnumeratorCallbackTag,
|
||||
|
||||
kApiAccessCheckCallbackTag,
|
||||
kApiAbortScriptExecutionCallbackTag,
|
||||
kSyntheticModuleTag,
|
||||
|
@ -636,6 +658,9 @@ constexpr ExternalPointerTagRange kAnySharedExternalPointerTagRange(
|
|||
kFirstSharedExternalPointerTag, kLastSharedExternalPointerTag);
|
||||
constexpr ExternalPointerTagRange kAnyForeignExternalPointerTagRange(
|
||||
kFirstForeignExternalPointerTag, kLastForeignExternalPointerTag);
|
||||
constexpr ExternalPointerTagRange kAnyInterceptorInfoExternalPointerTagRange(
|
||||
kFirstInterceptorInfoExternalPointerTag,
|
||||
kLastInterceptorInfoExternalPointerTag);
|
||||
constexpr ExternalPointerTagRange kAnyManagedExternalPointerTagRange(
|
||||
kFirstManagedExternalPointerTag, kLastManagedExternalPointerTag);
|
||||
constexpr ExternalPointerTagRange kAnyMaybeReadOnlyExternalPointerTagRange(
|
||||
|
@ -678,7 +703,8 @@ V8_INLINE static constexpr bool IsManagedExternalPointerType(
|
|||
V8_INLINE static constexpr bool ExternalPointerCanBeEmpty(
|
||||
ExternalPointerTagRange tag_range) {
|
||||
return tag_range.Contains(kArrayBufferExtensionTag) ||
|
||||
tag_range.Contains(kEmbedderDataSlotPayloadTag);
|
||||
tag_range.Contains(kEmbedderDataSlotPayloadTag) ||
|
||||
kAnyInterceptorInfoExternalPointerTagRange.Contains(tag_range);
|
||||
}
|
||||
|
||||
// Indirect Pointers.
|
||||
|
@ -1331,7 +1357,7 @@ class BackingStoreBase {};
|
|||
|
||||
// The maximum value in enum GarbageCollectionReason, defined in heap.h.
|
||||
// This is needed for histograms sampling garbage collection reasons.
|
||||
constexpr int kGarbageCollectionReasonMaxValue = 29;
|
||||
constexpr int kGarbageCollectionReasonMaxValue = 30;
|
||||
|
||||
// Base class for the address block allocator compatible with standard
|
||||
// containers, which registers its allocated range as strong roots.
|
||||
|
|
31
deps/v8/include/v8-isolate.h
vendored
31
deps/v8/include/v8-isolate.h
vendored
|
@ -1665,16 +1665,6 @@ class V8_EXPORT Isolate {
|
|||
|
||||
void SetWasmJSPIEnabledCallback(WasmJSPIEnabledCallback callback);
|
||||
|
||||
/**
|
||||
* Register callback to control whether compile hints magic comments are
|
||||
* enabled.
|
||||
*/
|
||||
V8_DEPRECATED(
|
||||
"Will be removed, use ScriptCompiler::CompileOptions for enabling the "
|
||||
"compile hints magic comments")
|
||||
void SetJavaScriptCompileHintsMagicEnabledCallback(
|
||||
JavaScriptCompileHintsMagicEnabledCallback callback);
|
||||
|
||||
/**
|
||||
* This function can be called by the embedder to signal V8 that the dynamic
|
||||
* enabling of features has finished. V8 can now set up dynamically added
|
||||
|
@ -1697,7 +1687,7 @@ class V8_EXPORT Isolate {
|
|||
* If data is specified, it will be passed to the callback when it is called.
|
||||
* Otherwise, the exception object will be passed to the callback instead.
|
||||
*/
|
||||
bool AddMessageListener(MessageCallback that,
|
||||
bool AddMessageListener(MessageCallback callback,
|
||||
Local<Value> data = Local<Value>());
|
||||
|
||||
/**
|
||||
|
@ -1711,14 +1701,14 @@ class V8_EXPORT Isolate {
|
|||
*
|
||||
* A listener can listen for particular error levels by providing a mask.
|
||||
*/
|
||||
bool AddMessageListenerWithErrorLevel(MessageCallback that,
|
||||
bool AddMessageListenerWithErrorLevel(MessageCallback callback,
|
||||
int message_levels,
|
||||
Local<Value> data = Local<Value>());
|
||||
|
||||
/**
|
||||
* Remove all message listeners from the specified callback function.
|
||||
*/
|
||||
void RemoveMessageListeners(MessageCallback that);
|
||||
void RemoveMessageListeners(MessageCallback callback);
|
||||
|
||||
/** Callback function for reporting failed access checks.*/
|
||||
void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
|
||||
|
@ -1789,6 +1779,21 @@ class V8_EXPORT Isolate {
|
|||
*/
|
||||
std::string GetDefaultLocale();
|
||||
|
||||
/**
|
||||
* Returns a canonical and case-regularized form of locale if Intl support is
|
||||
* enabled. If the locale is not syntactically well-formed, throws a
|
||||
* RangeError.
|
||||
*
|
||||
* If Intl support is not enabled, returns Nothing<std::string>().
|
||||
*
|
||||
* Corresponds to the combination of the abstract operations
|
||||
* IsStructurallyValidLanguageTag and CanonicalizeUnicodeLocaleId. See:
|
||||
* https://tc39.es/ecma402/#sec-isstructurallyvalidlanguagetag
|
||||
* https://tc39.es/ecma402/#sec-canonicalizeunicodelocaleid
|
||||
*/
|
||||
V8_WARN_UNUSED_RESULT Maybe<std::string>
|
||||
ValidateAndCanonicalizeUnicodeLocaleId(std::string_view locale);
|
||||
|
||||
/**
|
||||
* Returns the hash seed for that isolate, for testing purposes.
|
||||
*/
|
||||
|
|
10
deps/v8/include/v8-platform.h
vendored
10
deps/v8/include/v8-platform.h
vendored
|
@ -505,6 +505,16 @@ class PageAllocator {
|
|||
virtual void* AllocatePages(void* address, size_t length, size_t alignment,
|
||||
Permission permissions) = 0;
|
||||
|
||||
/**
|
||||
* Resizes the previously allocated memory at the given address. Returns true
|
||||
* if the allocation could be resized. Returns false if this operation is
|
||||
* either not supported or the object could not be resized in-place.
|
||||
*/
|
||||
virtual bool ResizeAllocationAt(void* address, size_t old_length,
|
||||
size_t new_length, Permission permissions) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Frees memory in a range that was allocated by a call to AllocatePages.
|
||||
*/
|
||||
|
|
2
deps/v8/include/v8-primitive.h
vendored
2
deps/v8/include/v8-primitive.h
vendored
|
@ -819,6 +819,8 @@ class V8_EXPORT Symbol : public Name {
|
|||
static Local<Symbol> GetToPrimitive(Isolate* isolate);
|
||||
static Local<Symbol> GetToStringTag(Isolate* isolate);
|
||||
static Local<Symbol> GetUnscopables(Isolate* isolate);
|
||||
static Local<Symbol> GetDispose(Isolate* isolate);
|
||||
static Local<Symbol> GetAsyncDispose(Isolate* isolate);
|
||||
|
||||
V8_INLINE static Symbol* Cast(Data* data) {
|
||||
#ifdef V8_ENABLE_CHECKS
|
||||
|
|
52
deps/v8/include/v8-template.h
vendored
52
deps/v8/include/v8-template.h
vendored
|
@ -188,7 +188,8 @@ using NamedPropertyGetterCallback = Intercepted (*)(
|
|||
// Use `info.GetReturnValue().Set()` to set the return value of the
|
||||
// intercepted get request. If the property does not exist the callback should
|
||||
// not set the result and must not produce side effects.
|
||||
using GenericNamedPropertyGetterCallback =
|
||||
using GenericNamedPropertyGetterCallback V8_DEPRECATE_SOON(
|
||||
"Use NamedPropertyGetterCallback instead") =
|
||||
void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
/**
|
||||
|
@ -221,7 +222,8 @@ using NamedPropertySetterCallback =
|
|||
// `info.GetReturnValue().Set(value)`. If the setter did not intercept the
|
||||
// request, i.e., if the request should be handled as if no interceptor is
|
||||
// present, do not not call `Set()` and do not produce side effects.
|
||||
using GenericNamedPropertySetterCallback =
|
||||
using GenericNamedPropertySetterCallback V8_DEPRECATE_SOON(
|
||||
"Use NamedPropertySetterCallback instead") =
|
||||
void (*)(Local<Name> property, Local<Value> value,
|
||||
const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
|
@ -259,7 +261,8 @@ using NamedPropertyQueryCallback = Intercepted (*)(
|
|||
// value is an integer encoding a `v8::PropertyAttribute`. If the property does
|
||||
// not exist the callback should not set the result and must not produce side
|
||||
// effects.
|
||||
using GenericNamedPropertyQueryCallback =
|
||||
using GenericNamedPropertyQueryCallback V8_DEPRECATE_SOON(
|
||||
"Use NamedPropertyQueryCallback instead") =
|
||||
void (*)(Local<Name> property, const PropertyCallbackInfo<Integer>& info);
|
||||
|
||||
/**
|
||||
|
@ -296,7 +299,8 @@ using NamedPropertyDeleterCallback = Intercepted (*)(
|
|||
// `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is
|
||||
// used as the return value of `delete`. If the deleter does not intercept the
|
||||
// request then it should not set the result and must not produce side effects.
|
||||
using GenericNamedPropertyDeleterCallback =
|
||||
using GenericNamedPropertyDeleterCallback V8_DEPRECATE_SOON(
|
||||
"Use NamedPropertyDeleterCallback instead") =
|
||||
void (*)(Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
|
||||
|
||||
/**
|
||||
|
@ -309,7 +313,9 @@ using NamedPropertyEnumeratorCallback =
|
|||
void (*)(const PropertyCallbackInfo<Array>& info);
|
||||
// This variant will be deprecated soon.
|
||||
// This is just a renaming of the typedef.
|
||||
using GenericNamedPropertyEnumeratorCallback = NamedPropertyEnumeratorCallback;
|
||||
using GenericNamedPropertyEnumeratorCallback V8_DEPRECATE_SOON(
|
||||
"Use NamedPropertyEnumeratorCallback instead") =
|
||||
NamedPropertyEnumeratorCallback;
|
||||
|
||||
/**
|
||||
* Interceptor for defineProperty requests on an object.
|
||||
|
@ -341,7 +347,8 @@ using NamedPropertyDefinerCallback =
|
|||
// `info.GetReturnValue().Set(value)`. If the definer did not intercept the
|
||||
// request, i.e., if the request should be handled as if no interceptor is
|
||||
// present, do not not call `Set()` and do not produce side effects.
|
||||
using GenericNamedPropertyDefinerCallback =
|
||||
using GenericNamedPropertyDefinerCallback V8_DEPRECATE_SOON(
|
||||
"Use NamedPropertyDefinerCallback instead") =
|
||||
void (*)(Local<Name> property, const PropertyDescriptor& desc,
|
||||
const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
|
@ -377,7 +384,8 @@ using NamedPropertyDescriptorCallback = Intercepted (*)(
|
|||
// intercepted request. The return value must be an object that
|
||||
// can be converted to a PropertyDescriptor, e.g., a `v8::Value` returned from
|
||||
// `v8::Object::getOwnPropertyDescriptor`.
|
||||
using GenericNamedPropertyDescriptorCallback =
|
||||
using GenericNamedPropertyDescriptorCallback V8_DEPRECATE_SOON(
|
||||
"Use NamedPropertyDescriptorCallback instead") =
|
||||
void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
// TODO(ishell): Rename IndexedPropertyXxxCallbackV2 back to
|
||||
|
@ -390,7 +398,8 @@ using GenericNamedPropertyDescriptorCallback =
|
|||
using IndexedPropertyGetterCallbackV2 =
|
||||
Intercepted (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
|
||||
// This variant will be deprecated soon.
|
||||
using IndexedPropertyGetterCallback =
|
||||
using IndexedPropertyGetterCallback V8_DEPRECATE_SOON(
|
||||
"Use IndexedPropertyGetterCallbackV2 instead") =
|
||||
void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
/**
|
||||
|
@ -399,7 +408,8 @@ using IndexedPropertyGetterCallback =
|
|||
using IndexedPropertySetterCallbackV2 = Intercepted (*)(
|
||||
uint32_t index, Local<Value> value, const PropertyCallbackInfo<void>& info);
|
||||
// This variant will be deprecated soon.
|
||||
using IndexedPropertySetterCallback =
|
||||
using IndexedPropertySetterCallback V8_DEPRECATE_SOON(
|
||||
"Use IndexedPropertySetterCallbackV2 instead") =
|
||||
void (*)(uint32_t index, Local<Value> value,
|
||||
const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
|
@ -409,7 +419,8 @@ using IndexedPropertySetterCallback =
|
|||
using IndexedPropertyQueryCallbackV2 =
|
||||
Intercepted (*)(uint32_t index, const PropertyCallbackInfo<Integer>& info);
|
||||
// This variant will be deprecated soon.
|
||||
using IndexedPropertyQueryCallback =
|
||||
using IndexedPropertyQueryCallback V8_DEPRECATE_SOON(
|
||||
"Use IndexedPropertyQueryCallbackV2 instead") =
|
||||
void (*)(uint32_t index, const PropertyCallbackInfo<Integer>& info);
|
||||
|
||||
/**
|
||||
|
@ -418,7 +429,8 @@ using IndexedPropertyQueryCallback =
|
|||
using IndexedPropertyDeleterCallbackV2 =
|
||||
Intercepted (*)(uint32_t index, const PropertyCallbackInfo<Boolean>& info);
|
||||
// This variant will be deprecated soon.
|
||||
using IndexedPropertyDeleterCallback =
|
||||
using IndexedPropertyDeleterCallback V8_DEPRECATE_SOON(
|
||||
"Use IndexedPropertyDeleterCallbackV2 instead") =
|
||||
void (*)(uint32_t index, const PropertyCallbackInfo<Boolean>& info);
|
||||
|
||||
/**
|
||||
|
@ -437,7 +449,8 @@ using IndexedPropertyDefinerCallbackV2 =
|
|||
Intercepted (*)(uint32_t index, const PropertyDescriptor& desc,
|
||||
const PropertyCallbackInfo<void>& info);
|
||||
// This variant will be deprecated soon.
|
||||
using IndexedPropertyDefinerCallback =
|
||||
using IndexedPropertyDefinerCallback V8_DEPRECATE_SOON(
|
||||
"Use IndexedPropertyDefinerCallbackV2 instead") =
|
||||
void (*)(uint32_t index, const PropertyDescriptor& desc,
|
||||
const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
|
@ -447,7 +460,8 @@ using IndexedPropertyDefinerCallback =
|
|||
using IndexedPropertyDescriptorCallbackV2 =
|
||||
Intercepted (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
|
||||
// This variant will be deprecated soon.
|
||||
using IndexedPropertyDescriptorCallback =
|
||||
using IndexedPropertyDescriptorCallback V8_DEPRECATE_SOON(
|
||||
"Use IndexedPropertyDescriptorCallbackV2 instead") =
|
||||
void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
|
||||
|
||||
/**
|
||||
|
@ -702,8 +716,8 @@ class V8_EXPORT FunctionTemplate : public Template {
|
|||
bool IsLeafTemplateForApiObject(v8::Local<v8::Value> value) const;
|
||||
|
||||
/**
|
||||
* Checks if the object can be promoted to read only space, seals it and
|
||||
* prepares for promotion.
|
||||
* Seal the object and mark it for promotion to read only space during
|
||||
* context snapshot creation.
|
||||
*
|
||||
* This is an experimental feature and may still change significantly.
|
||||
*/
|
||||
|
@ -1037,6 +1051,14 @@ class V8_EXPORT ObjectTemplate : public Template {
|
|||
void SetCodeLike();
|
||||
bool IsCodeLike() const;
|
||||
|
||||
/**
|
||||
* Seal the object and mark it for promotion to read only space during
|
||||
* context snapshot creation.
|
||||
*
|
||||
* This is an experimental feature and may still change significantly.
|
||||
*/
|
||||
void SealAndPrepareForPromotionToReadOnly();
|
||||
|
||||
V8_INLINE static ObjectTemplate* Cast(Data* data);
|
||||
|
||||
private:
|
||||
|
|
6
deps/v8/include/v8-version.h
vendored
6
deps/v8/include/v8-version.h
vendored
|
@ -9,9 +9,9 @@
|
|||
// NOTE these macros are used by some of the tool scripts and the build
|
||||
// system so their names cannot be changed without changing the scripts.
|
||||
#define V8_MAJOR_VERSION 13
|
||||
#define V8_MINOR_VERSION 6
|
||||
#define V8_BUILD_NUMBER 233
|
||||
#define V8_PATCH_LEVEL 10
|
||||
#define V8_MINOR_VERSION 7
|
||||
#define V8_BUILD_NUMBER 152
|
||||
#define V8_PATCH_LEVEL 9
|
||||
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
|
|
4
deps/v8/include/v8config.h
vendored
4
deps/v8/include/v8config.h
vendored
|
@ -592,15 +592,11 @@ path. Add it with -I<path> to the command line
|
|||
// functions.
|
||||
// Use like:
|
||||
// V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod();
|
||||
#if V8_OS_WIN
|
||||
# define V8_PRESERVE_MOST
|
||||
#else
|
||||
#if V8_HAS_ATTRIBUTE_PRESERVE_MOST
|
||||
# define V8_PRESERVE_MOST __attribute__((preserve_most))
|
||||
#else
|
||||
# define V8_PRESERVE_MOST /* NOT SUPPORTED */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
|
||||
|
|
2
deps/v8/infra/mb/mb_config.pyl
vendored
2
deps/v8/infra/mb/mb_config.pyl
vendored
|
@ -853,7 +853,7 @@
|
|||
},
|
||||
|
||||
'conservative_stack_scanning': {
|
||||
'gn_args': 'v8_enable_conservative_stack_scanning=true',
|
||||
'gn_args': 'v8_enable_direct_handle=true',
|
||||
},
|
||||
|
||||
'dcheck_always_on': {
|
||||
|
|
4
deps/v8/infra/testing/builders.pyl
vendored
4
deps/v8/infra/testing/builders.pyl
vendored
|
@ -109,7 +109,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'variant': 'default', 'shards': 6},
|
||||
{'name': 'v8testing', 'variant': 'default', 'shards': 8},
|
||||
],
|
||||
},
|
||||
'v8_linux_gc_stress_dbg': {
|
||||
|
@ -1375,7 +1375,7 @@
|
|||
'os': 'Ubuntu-22.04',
|
||||
},
|
||||
'tests': [
|
||||
{'name': 'v8testing', 'variant': 'default', 'shards': 7},
|
||||
{'name': 'v8testing', 'variant': 'default', 'shards': 8},
|
||||
],
|
||||
},
|
||||
'V8 Linux - gc stress': {
|
||||
|
|
1
deps/v8/infra/whitespace.txt
vendored
Normal file
1
deps/v8/infra/whitespace.txt
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
Some infra-owned whitespace to test the cherry-picker.
|
78
deps/v8/src/api/api-arguments-inl.h
vendored
78
deps/v8/src/api/api-arguments-inl.h
vendored
|
@ -163,9 +163,8 @@ DirectHandle<Object> PropertyCallbackArguments::CallNamedQuery(
|
|||
RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedQueryCallback);
|
||||
slot_at(kPropertyKeyIndex).store(*name);
|
||||
slot_at(kReturnValueIndex).store(Smi::FromInt(v8::None));
|
||||
NamedPropertyQueryCallback f =
|
||||
ToCData<NamedPropertyQueryCallback, kApiNamedPropertyQueryCallbackTag>(
|
||||
isolate, interceptor->query());
|
||||
NamedPropertyQueryCallback f = reinterpret_cast<NamedPropertyQueryCallback>(
|
||||
interceptor->named_query(isolate));
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor,
|
||||
ExceptionContext::kNamedQuery);
|
||||
v8::Intercepted intercepted = f(v8::Utils::ToLocal(name), callback_info);
|
||||
|
@ -180,9 +179,8 @@ DirectHandle<JSAny> PropertyCallbackArguments::CallNamedGetter(
|
|||
RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback);
|
||||
slot_at(kPropertyKeyIndex).store(*name);
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).undefined_value());
|
||||
NamedPropertyGetterCallback f =
|
||||
ToCData<NamedPropertyGetterCallback, kApiNamedPropertyGetterCallbackTag>(
|
||||
isolate, interceptor->getter());
|
||||
NamedPropertyGetterCallback f = reinterpret_cast<NamedPropertyGetterCallback>(
|
||||
interceptor->named_getter(isolate));
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor,
|
||||
ExceptionContext::kNamedGetter);
|
||||
v8::Intercepted intercepted = f(v8::Utils::ToLocal(name), callback_info);
|
||||
|
@ -198,9 +196,8 @@ Handle<JSAny> PropertyCallbackArguments::CallNamedDescriptor(
|
|||
slot_at(kPropertyKeyIndex).store(*name);
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).undefined_value());
|
||||
NamedPropertyDescriptorCallback f =
|
||||
ToCData<NamedPropertyDescriptorCallback,
|
||||
kApiNamedPropertyDescriptorCallbackTag>(
|
||||
isolate, interceptor->descriptor());
|
||||
reinterpret_cast<NamedPropertyDescriptorCallback>(
|
||||
interceptor->named_descriptor(isolate));
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor,
|
||||
ExceptionContext::kNamedDescriptor);
|
||||
v8::Intercepted intercepted = f(v8::Utils::ToLocal(name), callback_info);
|
||||
|
@ -216,9 +213,8 @@ v8::Intercepted PropertyCallbackArguments::CallNamedSetter(
|
|||
RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedSetterCallback);
|
||||
slot_at(kPropertyKeyIndex).store(*name);
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).true_value());
|
||||
NamedPropertySetterCallback f =
|
||||
ToCData<NamedPropertySetterCallback, kApiNamedPropertySetterCallbackTag>(
|
||||
isolate, interceptor->setter());
|
||||
NamedPropertySetterCallback f = reinterpret_cast<NamedPropertySetterCallback>(
|
||||
interceptor->named_setter(isolate));
|
||||
DirectHandle<InterceptorInfo> has_side_effects;
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects,
|
||||
ExceptionContext::kNamedSetter);
|
||||
|
@ -235,9 +231,9 @@ v8::Intercepted PropertyCallbackArguments::CallNamedDefiner(
|
|||
RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDefinerCallback);
|
||||
slot_at(kPropertyKeyIndex).store(*name);
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).true_value());
|
||||
NamedPropertyDefinerCallback f = ToCData<NamedPropertyDefinerCallback,
|
||||
kApiNamedPropertyDefinerCallbackTag>(
|
||||
isolate, interceptor->definer());
|
||||
NamedPropertyDefinerCallback f =
|
||||
reinterpret_cast<NamedPropertyDefinerCallback>(
|
||||
interceptor->named_definer(isolate));
|
||||
DirectHandle<InterceptorInfo> has_side_effects;
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects,
|
||||
ExceptionContext::kNamedDefiner);
|
||||
|
@ -253,9 +249,9 @@ v8::Intercepted PropertyCallbackArguments::CallNamedDeleter(
|
|||
RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDeleterCallback);
|
||||
slot_at(kPropertyKeyIndex).store(*name);
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).true_value());
|
||||
NamedPropertyDeleterCallback f = ToCData<NamedPropertyDeleterCallback,
|
||||
kApiNamedPropertyDeleterCallbackTag>(
|
||||
isolate, interceptor->deleter());
|
||||
NamedPropertyDeleterCallback f =
|
||||
reinterpret_cast<NamedPropertyDeleterCallback>(
|
||||
interceptor->named_deleter(isolate));
|
||||
DirectHandle<InterceptorInfo> has_side_effects;
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, has_side_effects,
|
||||
ExceptionContext::kNamedDeleter);
|
||||
|
@ -284,9 +280,8 @@ DirectHandle<Object> PropertyCallbackArguments::CallIndexedQuery(
|
|||
slot_at(kPropertyKeyIndex).store(Smi::zero()); // indexed callback marker
|
||||
slot_at(kReturnValueIndex).store(Smi::FromInt(v8::None));
|
||||
IndexedPropertyQueryCallbackV2 f =
|
||||
ToCData<IndexedPropertyQueryCallbackV2,
|
||||
kApiIndexedPropertyQueryCallbackTag>(isolate,
|
||||
interceptor->query());
|
||||
reinterpret_cast<IndexedPropertyQueryCallbackV2>(
|
||||
interceptor->indexed_query(isolate));
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Integer, interceptor,
|
||||
ExceptionContext::kIndexedQuery);
|
||||
v8::Intercepted intercepted = f(index, callback_info);
|
||||
|
@ -303,9 +298,8 @@ DirectHandle<JSAny> PropertyCallbackArguments::CallIndexedGetter(
|
|||
slot_at(kPropertyKeyIndex).store(Smi::zero()); // indexed callback marker
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).undefined_value());
|
||||
IndexedPropertyGetterCallbackV2 f =
|
||||
ToCData<IndexedPropertyGetterCallbackV2,
|
||||
kApiIndexedPropertyGetterCallbackTag>(isolate,
|
||||
interceptor->getter());
|
||||
reinterpret_cast<IndexedPropertyGetterCallbackV2>(
|
||||
interceptor->indexed_getter(isolate));
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor,
|
||||
ExceptionContext::kIndexedGetter);
|
||||
v8::Intercepted intercepted = f(index, callback_info);
|
||||
|
@ -322,9 +316,8 @@ Handle<JSAny> PropertyCallbackArguments::CallIndexedDescriptor(
|
|||
slot_at(kPropertyKeyIndex).store(Smi::zero()); // indexed callback marker
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).undefined_value());
|
||||
IndexedPropertyDescriptorCallbackV2 f =
|
||||
ToCData<IndexedPropertyDescriptorCallbackV2,
|
||||
kApiIndexedPropertyDescriptorCallbackTag>(
|
||||
isolate, interceptor->descriptor());
|
||||
reinterpret_cast<IndexedPropertyDescriptorCallbackV2>(
|
||||
interceptor->indexed_descriptor(isolate));
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Value, interceptor,
|
||||
ExceptionContext::kIndexedDescriptor);
|
||||
v8::Intercepted intercepted = f(index, callback_info);
|
||||
|
@ -342,9 +335,8 @@ v8::Intercepted PropertyCallbackArguments::CallIndexedSetter(
|
|||
slot_at(kPropertyKeyIndex).store(Smi::zero()); // indexed callback marker
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).true_value());
|
||||
IndexedPropertySetterCallbackV2 f =
|
||||
ToCData<IndexedPropertySetterCallbackV2,
|
||||
kApiIndexedPropertySetterCallbackTag>(isolate,
|
||||
interceptor->setter());
|
||||
reinterpret_cast<IndexedPropertySetterCallbackV2>(
|
||||
interceptor->indexed_setter(isolate));
|
||||
DirectHandle<InterceptorInfo> has_side_effects;
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects,
|
||||
ExceptionContext::kIndexedSetter);
|
||||
|
@ -363,9 +355,8 @@ v8::Intercepted PropertyCallbackArguments::CallIndexedDefiner(
|
|||
slot_at(kPropertyKeyIndex).store(Smi::zero()); // indexed callback marker
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).true_value());
|
||||
IndexedPropertyDefinerCallbackV2 f =
|
||||
ToCData<IndexedPropertyDefinerCallbackV2,
|
||||
kApiIndexedPropertyDefinerCallbackTag>(isolate,
|
||||
interceptor->definer());
|
||||
reinterpret_cast<IndexedPropertyDefinerCallbackV2>(
|
||||
interceptor->indexed_definer(isolate));
|
||||
DirectHandle<InterceptorInfo> has_side_effects;
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, void, has_side_effects,
|
||||
ExceptionContext::kIndexedDefiner);
|
||||
|
@ -382,9 +373,8 @@ v8::Intercepted PropertyCallbackArguments::CallIndexedDeleter(
|
|||
slot_at(kPropertyKeyIndex).store(Smi::zero()); // indexed callback marker
|
||||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).true_value());
|
||||
IndexedPropertyDeleterCallbackV2 f =
|
||||
ToCData<IndexedPropertyDeleterCallbackV2,
|
||||
kApiIndexedPropertyDeleterCallbackTag>(isolate,
|
||||
interceptor->deleter());
|
||||
reinterpret_cast<IndexedPropertyDeleterCallbackV2>(
|
||||
interceptor->indexed_deleter(isolate));
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Boolean, interceptor,
|
||||
ExceptionContext::kIndexedDeleter);
|
||||
v8::Intercepted intercepted = f(index, callback_info);
|
||||
|
@ -395,8 +385,8 @@ DirectHandle<JSObjectOrUndefined>
|
|||
PropertyCallbackArguments::CallPropertyEnumerator(
|
||||
DirectHandle<InterceptorInfo> interceptor) {
|
||||
// Named and indexed enumerator callbacks have same signatures.
|
||||
static_assert(std::is_same<NamedPropertyEnumeratorCallback,
|
||||
IndexedPropertyEnumeratorCallback>::value);
|
||||
static_assert(std::is_same_v<NamedPropertyEnumeratorCallback,
|
||||
IndexedPropertyEnumeratorCallback>);
|
||||
Isolate* isolate = this->isolate();
|
||||
slot_at(kPropertyKeyIndex).store(Smi::zero()); // not relevant
|
||||
// Enumerator callback's return value is initialized with undefined even
|
||||
|
@ -404,10 +394,14 @@ PropertyCallbackArguments::CallPropertyEnumerator(
|
|||
slot_at(kReturnValueIndex).store(ReadOnlyRoots(isolate).undefined_value());
|
||||
// TODO(ishell): consider making it return v8::Intercepted to indicate
|
||||
// whether the result was set or not.
|
||||
IndexedPropertyEnumeratorCallback f =
|
||||
v8::ToCData<IndexedPropertyEnumeratorCallback,
|
||||
kApiIndexedPropertyEnumeratorCallbackTag>(
|
||||
isolate, interceptor->enumerator());
|
||||
IndexedPropertyEnumeratorCallback f;
|
||||
if (interceptor->is_named()) {
|
||||
f = reinterpret_cast<NamedPropertyEnumeratorCallback>(
|
||||
interceptor->named_enumerator(isolate));
|
||||
} else {
|
||||
f = reinterpret_cast<IndexedPropertyEnumeratorCallback>(
|
||||
interceptor->indexed_enumerator(isolate));
|
||||
}
|
||||
PREPARE_CALLBACK_INFO_INTERCEPTOR(isolate, f, v8::Array, interceptor,
|
||||
ExceptionContext::kNamedEnumerator);
|
||||
f(callback_info);
|
||||
|
|
6
deps/v8/src/api/api-natives.cc
vendored
6
deps/v8/src/api/api-natives.cc
vendored
|
@ -83,7 +83,7 @@ MaybeDirectHandle<Object> DefineAccessorProperty(
|
|||
isolate, getter,
|
||||
InstantiateFunction(isolate, Cast<FunctionTemplateInfo>(getter)));
|
||||
DirectHandle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||
Cast<JSFunction>(getter)->UpdateCode(*trampoline);
|
||||
Cast<JSFunction>(getter)->UpdateCode(isolate, *trampoline);
|
||||
}
|
||||
if (IsFunctionTemplateInfo(*setter) &&
|
||||
Cast<FunctionTemplateInfo>(*setter)->BreakAtEntry(isolate)) {
|
||||
|
@ -91,7 +91,7 @@ MaybeDirectHandle<Object> DefineAccessorProperty(
|
|||
isolate, setter,
|
||||
InstantiateFunction(isolate, Cast<FunctionTemplateInfo>(setter)));
|
||||
DirectHandle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
|
||||
Cast<JSFunction>(setter)->UpdateCode(*trampoline);
|
||||
Cast<JSFunction>(setter)->UpdateCode(isolate, *trampoline);
|
||||
}
|
||||
RETURN_ON_EXCEPTION(isolate, JSObject::DefineOwnAccessorIgnoreAttributes(
|
||||
object, name, getter, setter, attributes));
|
||||
|
@ -330,7 +330,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
|
|||
|
||||
const auto new_js_object_type =
|
||||
constructor->has_initial_map() &&
|
||||
IsJSApiWrapperObject(constructor->initial_map())
|
||||
IsJSApiWrapperObjectMap(constructor->initial_map())
|
||||
? NewJSObjectType::kAPIWrapper
|
||||
: NewJSObjectType::kNoAPIWrapper;
|
||||
Handle<JSObject> object;
|
||||
|
|
186
deps/v8/src/api/api.cc
vendored
186
deps/v8/src/api/api.cc
vendored
|
@ -36,10 +36,10 @@
|
|||
#include "src/api/api-natives.h"
|
||||
#include "src/base/hashing.h"
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
#include "src/base/platform/memory.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/base/platform/time.h"
|
||||
#include "src/base/safe_conversions.h"
|
||||
#include "src/base/utils/random-number-generator.h"
|
||||
#include "src/base/vector.h"
|
||||
#include "src/builtins/accessors.h"
|
||||
|
@ -86,6 +86,7 @@
|
|||
#include "src/objects/api-callbacks.h"
|
||||
#include "src/objects/backing-store.h"
|
||||
#include "src/objects/contexts.h"
|
||||
#include "src/objects/cpp-heap-object-wrapper-inl.h"
|
||||
#include "src/objects/embedder-data-array-inl.h"
|
||||
#include "src/objects/embedder-data-slot-inl.h"
|
||||
#include "src/objects/hash-table-inl.h"
|
||||
|
@ -153,6 +154,10 @@
|
|||
#include "src/wasm/wasm-serialization.h"
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
|
||||
#ifdef V8_INTL_SUPPORT
|
||||
#include "src/objects/intl-objects.h"
|
||||
#endif // V8_INTL_SUPPORT
|
||||
|
||||
#if V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
|
@ -815,6 +820,10 @@ bool Data::IsContext() const {
|
|||
return i::IsContext(*Utils::OpenDirectHandle(this));
|
||||
}
|
||||
|
||||
bool Data::IsCppHeapExternal() const {
|
||||
return IsCppHeapExternalObject(*Utils::OpenDirectHandle(this));
|
||||
}
|
||||
|
||||
void Context::Enter() {
|
||||
i::DisallowGarbageCollection no_gc;
|
||||
i::Tagged<i::NativeContext> env = *Utils::OpenDirectHandle(this);
|
||||
|
@ -1470,52 +1479,26 @@ template <PropertyType property_type, typename Getter, typename Setter,
|
|||
typename Enumerator, typename Definer>
|
||||
i::DirectHandle<i::InterceptorInfo> CreateInterceptorInfo(
|
||||
i::Isolate* i_isolate, Getter getter, Setter setter, Query query,
|
||||
Descriptor descriptor, Deleter remover, Enumerator enumerator,
|
||||
Descriptor descriptor, Deleter deleter, Enumerator enumerator,
|
||||
Definer definer, Local<Value> data,
|
||||
base::Flags<PropertyHandlerFlags> flags) {
|
||||
// TODO(saelo): instead of an in-sandbox struct with a lot of external
|
||||
// pointers (with different tags), consider creating an object in trusted
|
||||
// space instead. That way, only a single reference going out of the sandbox
|
||||
// would be required.
|
||||
auto obj = i::Cast<i::InterceptorInfo>(i_isolate->factory()->NewStruct(
|
||||
i::INTERCEPTOR_INFO_TYPE, i::AllocationType::kOld));
|
||||
obj->set_flags(0);
|
||||
auto obj = i_isolate->factory()->NewInterceptorInfo();
|
||||
obj->set_is_named(property_type == PropertyType::kNamed);
|
||||
|
||||
#define CALLBACK_TAG(NAME) \
|
||||
property_type == PropertyType::kNamed \
|
||||
? internal::kApiNamedProperty##NAME##CallbackTag \
|
||||
: internal::kApiIndexedProperty##NAME##CallbackTag;
|
||||
|
||||
if (getter != nullptr) {
|
||||
constexpr internal::ExternalPointerTag tag = CALLBACK_TAG(Getter);
|
||||
SET_FIELD_WRAPPED(i_isolate, obj, set_getter, getter, tag);
|
||||
#define SET_CALLBACK_FIELD(Name, name) \
|
||||
if (name != nullptr) { \
|
||||
if constexpr (property_type == PropertyType::kNamed) { \
|
||||
obj->set_named_##name(i_isolate, reinterpret_cast<i::Address>(name)); \
|
||||
} else { \
|
||||
obj->set_indexed_##name(i_isolate, reinterpret_cast<i::Address>(name)); \
|
||||
} \
|
||||
}
|
||||
if (setter != nullptr) {
|
||||
constexpr internal::ExternalPointerTag tag = CALLBACK_TAG(Setter);
|
||||
SET_FIELD_WRAPPED(i_isolate, obj, set_setter, setter, tag);
|
||||
}
|
||||
if (query != nullptr) {
|
||||
constexpr internal::ExternalPointerTag tag = CALLBACK_TAG(Query);
|
||||
SET_FIELD_WRAPPED(i_isolate, obj, set_query, query, tag);
|
||||
}
|
||||
if (descriptor != nullptr) {
|
||||
constexpr internal::ExternalPointerTag tag = CALLBACK_TAG(Descriptor);
|
||||
SET_FIELD_WRAPPED(i_isolate, obj, set_descriptor, descriptor, tag);
|
||||
}
|
||||
if (remover != nullptr) {
|
||||
constexpr internal::ExternalPointerTag tag = CALLBACK_TAG(Deleter);
|
||||
SET_FIELD_WRAPPED(i_isolate, obj, set_deleter, remover, tag);
|
||||
}
|
||||
if (enumerator != nullptr) {
|
||||
SET_FIELD_WRAPPED(i_isolate, obj, set_enumerator, enumerator,
|
||||
internal::kApiIndexedPropertyEnumeratorCallbackTag);
|
||||
}
|
||||
if (definer != nullptr) {
|
||||
constexpr internal::ExternalPointerTag tag = CALLBACK_TAG(Definer);
|
||||
SET_FIELD_WRAPPED(i_isolate, obj, set_definer, definer, tag);
|
||||
}
|
||||
|
||||
#undef CALLBACK_TAG
|
||||
INTERCEPTOR_INFO_CALLBACK_LIST(SET_CALLBACK_FIELD)
|
||||
#undef SET_CALLBACK_FIELD
|
||||
|
||||
obj->set_can_intercept_symbols(
|
||||
!(flags & PropertyHandlerFlags::kOnlyInterceptStrings));
|
||||
|
@ -1539,7 +1522,6 @@ i::DirectHandle<i::InterceptorInfo> CreateNamedInterceptorInfo(
|
|||
auto interceptor = CreateInterceptorInfo<PropertyType::kNamed>(
|
||||
i_isolate, getter, setter, query, descriptor, remover, enumerator,
|
||||
definer, data, flags);
|
||||
interceptor->set_is_named(true);
|
||||
return interceptor;
|
||||
}
|
||||
|
||||
|
@ -1553,7 +1535,6 @@ i::DirectHandle<i::InterceptorInfo> CreateIndexedInterceptorInfo(
|
|||
auto interceptor = CreateInterceptorInfo<PropertyType::kIndexed>(
|
||||
i_isolate, getter, setter, query, descriptor, remover, enumerator,
|
||||
definer, data, flags);
|
||||
interceptor->set_is_named(false);
|
||||
return interceptor;
|
||||
}
|
||||
|
||||
|
@ -2553,8 +2534,8 @@ V8_WARN_UNUSED_RESULT MaybeLocal<Function> ScriptCompiler::CompileFunction(
|
|||
i::DirectHandle<i::JSFunction> result;
|
||||
has_exception =
|
||||
!i::Compiler::GetWrappedFunction(
|
||||
Utils::OpenHandle(*source->source_string), context, script_details,
|
||||
cached_data.get(), options, no_cache_reason)
|
||||
i_isolate, Utils::OpenHandle(*source->source_string), context,
|
||||
script_details, cached_data.get(), options, no_cache_reason)
|
||||
.ToHandle(&result);
|
||||
if (options & kConsumeCodeCache) {
|
||||
source->cached_data->rejected = cached_data->rejected();
|
||||
|
@ -2649,8 +2630,10 @@ i::MaybeDirectHandle<i::SharedFunctionInfo> CompileStreamedSource(
|
|||
origin.ColumnOffset(), origin.SourceMapUrl(),
|
||||
origin.GetHostDefinedOptions(), origin.Options());
|
||||
i::ScriptStreamingData* data = v8_source->impl();
|
||||
i::IsCompiledScope is_compiled_scope;
|
||||
return i::Compiler::GetSharedFunctionInfoForStreamedScript(
|
||||
i_isolate, str, script_details, data, &v8_source->compilation_details());
|
||||
i_isolate, str, script_details, data, &is_compiled_scope,
|
||||
&v8_source->compilation_details());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -4022,6 +4005,11 @@ void v8::WasmModuleObject::CheckCast(Value* that) {
|
|||
"Value is not a WasmModuleObject");
|
||||
}
|
||||
|
||||
void v8::CppHeapExternal::CheckCast(v8::Data* that) {
|
||||
Utils::ApiCheck(that->IsCppHeapExternal(), "v8::CppHeapExternal::Cast",
|
||||
"Value is not a CppHeapExternal");
|
||||
}
|
||||
|
||||
v8::BackingStore::~BackingStore() {
|
||||
auto i_this = reinterpret_cast<const i::BackingStore*>(this);
|
||||
i_this->~BackingStore(); // manually call internal destructor
|
||||
|
@ -5498,7 +5486,8 @@ Local<Value> Function::GetDebugName() const {
|
|||
return ToApiHandle<Primitive>(i_isolate->factory()->undefined_value());
|
||||
}
|
||||
auto func = i::Cast<i::JSFunction>(self);
|
||||
i::DirectHandle<i::String> name = i::JSFunction::GetDebugName(func);
|
||||
i::DirectHandle<i::String> name =
|
||||
i::JSFunction::GetDebugName(i_isolate, func);
|
||||
return Utils::ToLocal(i::direct_handle(*name, i_isolate));
|
||||
}
|
||||
|
||||
|
@ -6394,7 +6383,7 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
|
|||
void* v8::Object::Unwrap(v8::Isolate* isolate, i::Address wrapper_obj,
|
||||
CppHeapPointerTagRange tag_range) {
|
||||
DCHECK_LE(tag_range.lower_bound, tag_range.upper_bound);
|
||||
return i::JSApiWrapper(
|
||||
return i::CppHeapObjectWrapper(
|
||||
i::Cast<i::JSObject>(i::Tagged<i::Object>(wrapper_obj)))
|
||||
.GetCppHeapWrappable(reinterpret_cast<i::Isolate*>(isolate), tag_range);
|
||||
}
|
||||
|
@ -6402,7 +6391,7 @@ void* v8::Object::Unwrap(v8::Isolate* isolate, i::Address wrapper_obj,
|
|||
// static
|
||||
void v8::Object::Wrap(v8::Isolate* isolate, i::Address wrapper_obj,
|
||||
CppHeapPointerTag tag, void* wrappable) {
|
||||
return i::JSApiWrapper(
|
||||
return i::CppHeapObjectWrapper(
|
||||
i::Cast<i::JSObject>(i::Tagged<i::Object>(wrapper_obj)))
|
||||
.SetCppHeapWrappable(reinterpret_cast<i::Isolate*>(isolate), wrappable,
|
||||
tag);
|
||||
|
@ -6904,7 +6893,8 @@ bool RequiresEmbedderSupportToFreeze(i::InstanceType obj_type) {
|
|||
|
||||
return (i::InstanceTypeChecker::IsJSApiObject(obj_type) ||
|
||||
i::InstanceTypeChecker::IsJSExternalObject(obj_type) ||
|
||||
i::InstanceTypeChecker::IsJSAPIObjectWithEmbedderSlots(obj_type));
|
||||
i::InstanceTypeChecker::IsJSAPIObjectWithEmbedderSlots(obj_type) ||
|
||||
i::InstanceTypeChecker::IsCppHeapExternalObject(obj_type));
|
||||
}
|
||||
|
||||
bool IsJSReceiverSafeToFreeze(i::InstanceType obj_type) {
|
||||
|
@ -7501,6 +7491,12 @@ bool FunctionTemplate::IsLeafTemplateForApiObject(
|
|||
return self->IsLeafTemplateForApiObject(object);
|
||||
}
|
||||
|
||||
void ObjectTemplate::SealAndPrepareForPromotionToReadOnly() {
|
||||
auto self = Utils::OpenDirectHandle(this);
|
||||
i::Isolate* i_isolate = self->GetIsolateChecked();
|
||||
i::ObjectTemplateInfo::SealAndPrepareForPromotionToReadOnly(i_isolate, self);
|
||||
}
|
||||
|
||||
void FunctionTemplate::SealAndPrepareForPromotionToReadOnly() {
|
||||
auto self = Utils::OpenDirectHandle(this);
|
||||
i::Isolate* i_isolate = self->GetIsolateChecked();
|
||||
|
@ -7529,6 +7525,26 @@ void* External::Value() const {
|
|||
->value(isolate);
|
||||
}
|
||||
|
||||
Local<CppHeapExternal> v8::CppHeapExternal::NewImpl(Isolate* v8_isolate,
|
||||
void* value,
|
||||
CppHeapPointerTag tag) {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
|
||||
API_RCS_SCOPE(i_isolate, CppHeapExternal, New);
|
||||
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
|
||||
i::DirectHandle<i::CppHeapExternalObject> external =
|
||||
i_isolate->factory()->NewCppHeapExternal();
|
||||
i::CppHeapObjectWrapper(*external).SetCppHeapWrappable(i_isolate, value, tag);
|
||||
return Utils::CppHeapExternalToLocal(external);
|
||||
}
|
||||
|
||||
void* CppHeapExternal::ValueImpl(v8::Isolate* isolate,
|
||||
CppHeapPointerTagRange tag_range) const {
|
||||
DCHECK_LE(tag_range.lower_bound, tag_range.upper_bound);
|
||||
auto self = Utils::OpenDirectHandle(this);
|
||||
return i::CppHeapObjectWrapper(*self).GetCppHeapWrappable(
|
||||
reinterpret_cast<i::Isolate*>(isolate), tag_range);
|
||||
}
|
||||
|
||||
// anonymous namespace for string creation helper functions
|
||||
namespace {
|
||||
|
||||
|
@ -8906,9 +8922,9 @@ MaybeLocal<WasmModuleObject> WasmModuleObject::Compile(
|
|||
#if V8_ENABLE_WEBASSEMBLY
|
||||
base::OwnedVector<const uint8_t> bytes = base::OwnedCopyOf(wire_bytes);
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
|
||||
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
|
||||
return MaybeLocal<WasmModuleObject>();
|
||||
}
|
||||
// We don't check for `IsWasmCodegenAllowed` here, because this function is
|
||||
// used for ESM integration, which in terms of security is equivalent to
|
||||
// <script> tags rather than to {eval}.
|
||||
i::MaybeDirectHandle<i::WasmModuleObject> maybe_compiled;
|
||||
{
|
||||
i::wasm::ErrorThrower thrower(i_isolate, "WasmModuleObject::Compile()");
|
||||
|
@ -9231,7 +9247,7 @@ v8::MemorySpan<uint8_t> v8::ArrayBufferView::GetContents(
|
|||
v8::MemorySpan<uint8_t> storage) {
|
||||
internal::DisallowGarbageCollection no_gc;
|
||||
auto self = Utils::OpenDirectHandle(this);
|
||||
if (self->WasDetached()) {
|
||||
if (self->IsDetachedOrOutOfBounds()) {
|
||||
return {};
|
||||
}
|
||||
if (internal::IsJSTypedArray(*self)) {
|
||||
|
@ -9272,13 +9288,13 @@ bool v8::ArrayBufferView::HasBuffer() const {
|
|||
|
||||
size_t v8::ArrayBufferView::ByteOffset() {
|
||||
auto obj = Utils::OpenDirectHandle(this);
|
||||
return obj->WasDetached() ? 0 : obj->byte_offset();
|
||||
return obj->IsDetachedOrOutOfBounds() ? 0 : obj->byte_offset();
|
||||
}
|
||||
|
||||
size_t v8::ArrayBufferView::ByteLength() {
|
||||
i::DisallowGarbageCollection no_gc;
|
||||
i::Tagged<i::JSArrayBufferView> obj = *Utils::OpenDirectHandle(this);
|
||||
if (obj->WasDetached()) {
|
||||
if (obj->IsDetachedOrOutOfBounds()) {
|
||||
return 0;
|
||||
}
|
||||
if (i::IsJSTypedArray(obj)) {
|
||||
|
@ -9293,7 +9309,7 @@ size_t v8::ArrayBufferView::ByteLength() {
|
|||
size_t v8::TypedArray::Length() {
|
||||
i::DisallowGarbageCollection no_gc;
|
||||
i::Tagged<i::JSTypedArray> obj = *Utils::OpenDirectHandle(this);
|
||||
return obj->WasDetached() ? 0 : obj->GetLength();
|
||||
return obj->IsDetachedOrOutOfBounds() ? 0 : obj->GetLength();
|
||||
}
|
||||
|
||||
static_assert(v8::TypedArray::kMaxByteLength == i::JSTypedArray::kMaxByteLength,
|
||||
|
@ -9548,7 +9564,9 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* v8_isolate, Local<String> name) {
|
|||
V(Split, split) \
|
||||
V(ToPrimitive, to_primitive) \
|
||||
V(ToStringTag, to_string_tag) \
|
||||
V(Unscopables, unscopables)
|
||||
V(Unscopables, unscopables) \
|
||||
V(Dispose, dispose) \
|
||||
V(AsyncDispose, async_dispose)
|
||||
|
||||
#define SYMBOL_GETTER(Name, name) \
|
||||
Local<Symbol> v8::Symbol::Get##Name(Isolate* v8_isolate) { \
|
||||
|
@ -10821,12 +10839,6 @@ CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
|
|||
SharedArrayBufferConstructorEnabledCallback,
|
||||
sharedarraybuffer_constructor_enabled_callback)
|
||||
|
||||
// TODO(42203853): Remove this after the deprecated API is removed. Right now,
|
||||
// the embedder can still set the callback, but it's never called.
|
||||
CALLBACK_SETTER(JavaScriptCompileHintsMagicEnabledCallback,
|
||||
JavaScriptCompileHintsMagicEnabledCallback,
|
||||
compile_hints_magic_enabled_callback)
|
||||
|
||||
CALLBACK_SETTER(IsJSApiWrapperNativeErrorCallback,
|
||||
IsJSApiWrapperNativeErrorCallback,
|
||||
is_js_api_wrapper_native_error_callback)
|
||||
|
@ -10868,11 +10880,11 @@ bool Isolate::IsDead() {
|
|||
return i_isolate->IsDead();
|
||||
}
|
||||
|
||||
bool Isolate::AddMessageListener(MessageCallback that, Local<Value> data) {
|
||||
return AddMessageListenerWithErrorLevel(that, kMessageError, data);
|
||||
bool Isolate::AddMessageListener(MessageCallback callback, Local<Value> data) {
|
||||
return AddMessageListenerWithErrorLevel(callback, kMessageError, data);
|
||||
}
|
||||
|
||||
bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback that,
|
||||
bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback callback,
|
||||
int message_levels,
|
||||
Local<Value> data) {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
|
@ -10882,10 +10894,11 @@ bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback that,
|
|||
i_isolate->factory()->message_listeners();
|
||||
i::DirectHandle<i::FixedArray> listener =
|
||||
i_isolate->factory()->NewFixedArray(3);
|
||||
i::DirectHandle<i::Foreign> foreign =
|
||||
i_isolate->factory()->NewForeign<internal::kMessageListenerTag>(
|
||||
FUNCTION_ADDR(that));
|
||||
listener->set(0, *foreign);
|
||||
|
||||
i::DirectHandle<i::Object> callback_obj =
|
||||
FromCData<internal::kMessageListenerTag>(i_isolate, callback);
|
||||
|
||||
listener->set(0, *callback_obj);
|
||||
listener->set(1, data.IsEmpty()
|
||||
? i::ReadOnlyRoots(i_isolate).undefined_value()
|
||||
: *Utils::OpenDirectHandle(*data));
|
||||
|
@ -10895,22 +10908,24 @@ bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback that,
|
|||
return true;
|
||||
}
|
||||
|
||||
void Isolate::RemoveMessageListeners(MessageCallback that) {
|
||||
void Isolate::RemoveMessageListeners(MessageCallback callback) {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
|
||||
i::HandleScope scope(i_isolate);
|
||||
i::DisallowGarbageCollection no_gc;
|
||||
i::Tagged<i::ArrayList> listeners = i_isolate->heap()->message_listeners();
|
||||
i::ReadOnlyRoots roots(i_isolate);
|
||||
for (int i = 0; i < listeners->length(); i++) {
|
||||
if (i::IsUndefined(listeners->get(i), i_isolate)) {
|
||||
if (i::IsUndefined(listeners->get(i), roots)) {
|
||||
continue; // skip deleted ones
|
||||
}
|
||||
i::Tagged<i::FixedArray> listener =
|
||||
i::Cast<i::FixedArray>(listeners->get(i));
|
||||
i::Tagged<i::Foreign> callback_obj = i::Cast<i::Foreign>(listener->get(0));
|
||||
if (callback_obj->foreign_address<internal::kMessageListenerTag>() ==
|
||||
FUNCTION_ADDR(that)) {
|
||||
listeners->set(i, i::ReadOnlyRoots(i_isolate).undefined_value());
|
||||
v8::MessageCallback cur_callback =
|
||||
v8::ToCData<v8::MessageCallback, i::kMessageListenerTag>(
|
||||
i_isolate, listener->get(0));
|
||||
if (cur_callback == callback) {
|
||||
listeners->set(i, roots.undefined_value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10976,6 +10991,27 @@ std::string Isolate::GetDefaultLocale() {
|
|||
#endif
|
||||
}
|
||||
|
||||
Maybe<std::string> Isolate::ValidateAndCanonicalizeUnicodeLocaleId(
|
||||
std::string_view tag) {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
ENTER_V8_NO_SCRIPT(i_isolate, this->GetCurrentContext(), Isolate,
|
||||
ValidateAndCanonicalizeUnicodeLocaleId, i::HandleScope);
|
||||
// has_exception is unused here because when Intl support is enabled, all work
|
||||
// is forwarded to i::Intl::ValidateAndCanonicalizeUnicodeLocaleId, which
|
||||
// encapsulates all exception handling, and when Intl support is disabled,
|
||||
// this method unconditionally throws.
|
||||
USE(has_exception);
|
||||
#ifdef V8_INTL_SUPPORT
|
||||
return i::Intl::ValidateAndCanonicalizeUnicodeLocaleId(i_isolate, tag);
|
||||
#else
|
||||
THROW_NEW_ERROR_RETURN_VALUE(
|
||||
i_isolate,
|
||||
NewRangeError(i::MessageTemplate::kInvalidLanguageTag,
|
||||
i_isolate->factory()->NewStringFromAsciiChecked(tag)),
|
||||
Nothing<std::string>());
|
||||
#endif
|
||||
}
|
||||
|
||||
uint64_t Isolate::GetHashSeed() {
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
return HashSeed(i_isolate);
|
||||
|
|
6
deps/v8/src/api/api.h
vendored
6
deps/v8/src/api/api.h
vendored
|
@ -8,6 +8,7 @@
|
|||
#include <memory>
|
||||
|
||||
#include "include/v8-container.h"
|
||||
#include "include/v8-cpp-heap-external.h"
|
||||
#include "include/v8-external.h"
|
||||
#include "include/v8-function-callback.h"
|
||||
#include "include/v8-proxy.h"
|
||||
|
@ -131,6 +132,7 @@ class RegisteredExtension {
|
|||
V(FixedArrayToLocal, FixedArray, FixedArray) \
|
||||
V(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) \
|
||||
V(ToLocal, ScriptOrModule, ScriptOrModule) \
|
||||
V(CppHeapExternalToLocal, CppHeapExternalObject, CppHeapExternal) \
|
||||
IF_WASM(V, ToLocal, WasmMemoryMapDescriptor, WasmMemoryMapDescriptor) \
|
||||
IF_WASM(V, ToLocal, WasmModuleObject, WasmModuleObject)
|
||||
|
||||
|
@ -149,7 +151,8 @@ class RegisteredExtension {
|
|||
V(CallableToLocal) \
|
||||
V(ToLocalPrimitive) \
|
||||
V(FixedArrayToLocal) \
|
||||
V(PrimitiveArrayToLocal)
|
||||
V(PrimitiveArrayToLocal) \
|
||||
V(CppHeapExternalToLocal)
|
||||
|
||||
#define OPEN_HANDLE_LIST(V) \
|
||||
V(Template, TemplateInfoWithProperties) \
|
||||
|
@ -206,6 +209,7 @@ class RegisteredExtension {
|
|||
V(ScriptOrModule, ScriptOrModule) \
|
||||
V(FixedArray, FixedArray) \
|
||||
V(ModuleRequest, ModuleRequest) \
|
||||
V(CppHeapExternal, CppHeapExternalObject) \
|
||||
IF_WASM(V, WasmMemoryMapDescriptor, WasmMemoryMapDescriptor) \
|
||||
IF_WASM(V, WasmMemoryObject, WasmMemoryObject)
|
||||
|
||||
|
|
21
deps/v8/src/ast/ast.h
vendored
21
deps/v8/src/ast/ast.h
vendored
|
@ -193,18 +193,6 @@ class Statement : public AstNode {
|
|||
|
||||
class Expression : public AstNode {
|
||||
public:
|
||||
enum Context {
|
||||
// Not assigned a context yet, or else will not be visited during
|
||||
// code generation.
|
||||
kUninitialized,
|
||||
// Evaluated for its side effects.
|
||||
kEffect,
|
||||
// Evaluated for its value (and side effects).
|
||||
kValue,
|
||||
// Evaluated for control flow (and side effects).
|
||||
kTest
|
||||
};
|
||||
|
||||
// True iff the expression is a valid reference expression.
|
||||
bool IsValidReferenceExpression() const;
|
||||
|
||||
|
@ -1917,6 +1905,8 @@ class BinaryOperation final : public Expression {
|
|||
Expression* left() const { return left_; }
|
||||
Expression* right() const { return right_; }
|
||||
|
||||
void UpdateRight(Expression* expr) { right_ = expr; }
|
||||
|
||||
// Returns true if one side is a Smi literal, returning the other side's
|
||||
// sub-expression in |subexpr| and the literal Smi in |literal|.
|
||||
bool IsSmiLiteralOperation(Expression** subexpr, Tagged<Smi>* literal);
|
||||
|
@ -1954,6 +1944,13 @@ class NaryOperation final : public Expression {
|
|||
subsequent_.emplace_back(expr, pos);
|
||||
}
|
||||
|
||||
Expression* last() const {
|
||||
return subsequent_[subsequent_.size() - 1].expression;
|
||||
}
|
||||
void UpdateLast(Expression* expr) {
|
||||
subsequent_[subsequent_.size() - 1].expression = expr;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class AstNodeFactory;
|
||||
friend Zone;
|
||||
|
|
24
deps/v8/src/ast/scopes.cc
vendored
24
deps/v8/src/ast/scopes.cc
vendored
|
@ -256,7 +256,7 @@ Scope::Scope(Zone* zone, ScopeType scope_type,
|
|||
// object variable (we don't store it explicitly).
|
||||
DCHECK_NOT_NULL(ast_value_factory);
|
||||
int home_object_index = scope_info->ContextSlotIndex(
|
||||
ast_value_factory->dot_home_object_string()->string());
|
||||
*ast_value_factory->dot_home_object_string()->string());
|
||||
DCHECK_IMPLIES(home_object_index >= 0,
|
||||
scope_type == CLASS_SCOPE || scope_type == BLOCK_SCOPE);
|
||||
if (home_object_index >= 0) {
|
||||
|
@ -974,7 +974,7 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
|
|||
|
||||
{
|
||||
location = VariableLocation::CONTEXT;
|
||||
index = scope_info->ContextSlotIndex(name->string(), &lookup_result);
|
||||
index = scope_info->ContextSlotIndex(name_handle, &lookup_result);
|
||||
found = index >= 0;
|
||||
}
|
||||
|
||||
|
@ -2003,9 +2003,8 @@ void Scope::Print(int n) {
|
|||
Indent(n1, "// class var");
|
||||
PrintF("%s%s:\n",
|
||||
class_scope->class_variable()->is_used() ? ", used" : ", unused",
|
||||
class_scope->should_save_class_variable_index()
|
||||
? ", index saved"
|
||||
: ", index not saved");
|
||||
class_scope->should_save_class_variable() ? ", saved"
|
||||
: ", not saved");
|
||||
PrintVar(n1, class_scope->class_variable());
|
||||
}
|
||||
}
|
||||
|
@ -2815,6 +2814,13 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* parse_info,
|
|||
it->second->Equals(scope_info, parse_info_sfi->live_edited(),
|
||||
&last_checked_field_index);
|
||||
|
||||
std::unique_ptr<char[]> script_name_or_url;
|
||||
size_t script_name_or_url_length = 0;
|
||||
if (IsString(script->GetNameOrSourceURL())) {
|
||||
script_name_or_url = Cast<String>(script->GetNameOrSourceURL())
|
||||
->ToCString(&script_name_or_url_length);
|
||||
}
|
||||
|
||||
std::unique_ptr<char[]> script_source;
|
||||
size_t script_source_length = 0;
|
||||
std::unique_ptr<char[]> function_source;
|
||||
|
@ -2875,7 +2881,11 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* parse_info,
|
|||
function_source_length,
|
||||
reinterpret_cast<Address>(function_source.get() +
|
||||
function_source_length),
|
||||
|
||||
0xcafe0006,
|
||||
script_name_or_url_length,
|
||||
reinterpret_cast<Address>(script_name_or_url.get()),
|
||||
reinterpret_cast<Address>(script_name_or_url.get() +
|
||||
script_name_or_url_length),
|
||||
0xcafeffff,
|
||||
};
|
||||
|
||||
|
@ -3043,7 +3053,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
|
|||
DisallowGarbageCollection no_gc;
|
||||
|
||||
VariableLookupResult lookup_result;
|
||||
int index = scope_info_->ContextSlotIndex(name->string(), &lookup_result);
|
||||
int index = scope_info_->ContextSlotIndex(*name->string(), &lookup_result);
|
||||
if (index < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
|
16
deps/v8/src/ast/scopes.h
vendored
16
deps/v8/src/ast/scopes.h
vendored
|
@ -394,6 +394,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
|
|||
return has_await_using_declaration_;
|
||||
}
|
||||
|
||||
bool has_context_cells() const {
|
||||
return v8_flags.script_context_cells && is_script_scope();
|
||||
}
|
||||
|
||||
bool is_wrapped_function() const {
|
||||
DCHECK_IMPLIES(is_wrapped_function_, is_function_scope());
|
||||
return is_wrapped_function_;
|
||||
|
@ -506,8 +510,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
|
|||
switch (scope_type_) {
|
||||
case MODULE_SCOPE:
|
||||
case WITH_SCOPE: // DebugEvaluateContext as well
|
||||
case SCRIPT_SCOPE: // Side data for const tracking let.
|
||||
case REPL_MODE_SCOPE:
|
||||
return true;
|
||||
default:
|
||||
DCHECK_IMPLIES(sloppy_eval_can_extend_vars_,
|
||||
|
@ -1469,8 +1471,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
|
|||
// The inner scope may also calls eval which may results in access to
|
||||
// static private names.
|
||||
// Only maintained when the scope is parsed.
|
||||
bool should_save_class_variable_index() const {
|
||||
return should_save_class_variable_index_ ||
|
||||
bool should_save_class_variable() const {
|
||||
return should_save_class_variable_ ||
|
||||
has_explicit_static_private_methods_access_ ||
|
||||
(has_static_private_methods_ && inner_scope_calls_eval_);
|
||||
}
|
||||
|
@ -1479,9 +1481,7 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
|
|||
bool is_anonymous_class() const { return is_anonymous_class_; }
|
||||
|
||||
// Overriden during reparsing
|
||||
void set_should_save_class_variable_index() {
|
||||
should_save_class_variable_index_ = true;
|
||||
}
|
||||
void set_should_save_class_variable() { should_save_class_variable_ = true; }
|
||||
|
||||
private:
|
||||
friend class Scope;
|
||||
|
@ -1528,7 +1528,7 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
|
|||
bool is_anonymous_class_ : 1 = false;
|
||||
// This is only maintained during reparsing, restored from the
|
||||
// preparsed data.
|
||||
bool should_save_class_variable_index_ : 1 = false;
|
||||
bool should_save_class_variable_ : 1 = false;
|
||||
};
|
||||
|
||||
// Iterate over the private name scope chain. The iteration proceeds from the
|
||||
|
|
4
deps/v8/src/base/address-region.h
vendored
4
deps/v8/src/base/address-region.h
vendored
|
@ -64,10 +64,6 @@ class AddressRegion {
|
|||
return address_ == other.address_ && size_ == other.size_;
|
||||
}
|
||||
|
||||
bool operator!=(AddressRegion other) const {
|
||||
return address_ != other.address_ || size_ != other.size_;
|
||||
}
|
||||
|
||||
private:
|
||||
Address address_ = 0;
|
||||
size_t size_ = 0;
|
||||
|
|
1
deps/v8/src/base/bits-iterator.h
vendored
1
deps/v8/src/base/bits-iterator.h
vendored
|
@ -32,7 +32,6 @@ class BitsIterator : public iterator<std::forward_iterator_tag, int> {
|
|||
}
|
||||
|
||||
bool operator==(BitsIterator other) { return bits_ == other.bits_; }
|
||||
bool operator!=(BitsIterator other) { return bits_ != other.bits_; }
|
||||
|
||||
private:
|
||||
T bits_;
|
||||
|
|
46
deps/v8/src/base/bounded-page-allocator.cc
vendored
46
deps/v8/src/base/bounded-page-allocator.cc
vendored
|
@ -112,6 +112,52 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool BoundedPageAllocator::ResizeAllocationAt(
|
||||
void* address, size_t old_size, size_t new_size,
|
||||
PageAllocator::Permission access) {
|
||||
MutexGuard guard(&mutex_);
|
||||
|
||||
const Address address_at = reinterpret_cast<Address>(address);
|
||||
DCHECK(IsAligned(old_size, commit_page_size_));
|
||||
DCHECK(IsAligned(new_size, commit_page_size_));
|
||||
|
||||
if (new_size < old_size) {
|
||||
// Shrinking is not supported at the moment.
|
||||
return false;
|
||||
} else if (new_size == old_size) {
|
||||
// Nothing to do in this case.
|
||||
return true;
|
||||
}
|
||||
|
||||
DCHECK_LT(old_size, new_size);
|
||||
|
||||
const Address allocated_old_size = RoundUp(old_size, allocate_page_size_);
|
||||
const Address allocated_new_size = RoundUp(new_size, allocate_page_size_);
|
||||
|
||||
if (allocated_old_size < allocated_new_size) {
|
||||
if (!region_allocator_.TryGrowRegion(address_at, allocated_new_size)) {
|
||||
allocation_status_ = AllocationStatus::kHintedAddressTakenOrNotFound;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!page_allocator_->SetPermissions(
|
||||
reinterpret_cast<void*>(address_at + old_size), new_size - old_size,
|
||||
access)) {
|
||||
if (allocated_old_size < allocated_new_size) {
|
||||
// This most likely means that we ran out of memory.
|
||||
CHECK_EQ(region_allocator_.TrimRegion(address_at, allocated_old_size),
|
||||
allocated_new_size - allocated_old_size);
|
||||
}
|
||||
|
||||
allocation_status_ = AllocationStatus::kFailedToCommit;
|
||||
return false;
|
||||
}
|
||||
|
||||
allocation_status_ = AllocationStatus::kSuccess;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr,
|
||||
size_t size) {
|
||||
MutexGuard guard(&mutex_);
|
||||
|
|
3
deps/v8/src/base/bounded-page-allocator.h
vendored
3
deps/v8/src/base/bounded-page-allocator.h
vendored
|
@ -106,6 +106,9 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
|
|||
// Allocates pages at given address, returns true on success.
|
||||
bool AllocatePagesAt(Address address, size_t size, Permission access);
|
||||
|
||||
bool ResizeAllocationAt(void* address, size_t old_size, size_t new_size,
|
||||
Permission access) override;
|
||||
|
||||
bool FreePages(void* address, size_t size) override;
|
||||
|
||||
bool ReleasePages(void* address, size_t size, size_t new_size) override;
|
||||
|
|
33
deps/v8/src/base/cpu.cc
vendored
33
deps/v8/src/base/cpu.cc
vendored
|
@ -190,17 +190,19 @@ static V8_INLINE void __cpuidex(int cpu_info[4], int info_type,
|
|||
* HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
|
||||
*/
|
||||
#define HWCAP2_MTE (1 << 18)
|
||||
#define HWCAP2_CSSC (1UL << 34)
|
||||
#define HWCAP2_HBC (1UL << 44)
|
||||
#endif // V8_HOST_ARCH_ARM64
|
||||
|
||||
#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64
|
||||
|
||||
static std::tuple<uint32_t, uint32_t> ReadELFHWCaps() {
|
||||
uint32_t hwcap = 0;
|
||||
uint32_t hwcap2 = 0;
|
||||
static std::tuple<uint64_t, uint64_t> ReadELFHWCaps() {
|
||||
uint64_t hwcap = 0;
|
||||
uint64_t hwcap2 = 0;
|
||||
#if (V8_GLIBC_PREREQ(2, 16) || V8_OS_ANDROID) && defined(AT_HWCAP)
|
||||
hwcap = static_cast<uint32_t>(getauxval(AT_HWCAP));
|
||||
hwcap = static_cast<uint64_t>(getauxval(AT_HWCAP));
|
||||
#if defined(AT_HWCAP2)
|
||||
hwcap2 = static_cast<uint32_t>(getauxval(AT_HWCAP2));
|
||||
hwcap2 = static_cast<uint64_t>(getauxval(AT_HWCAP2));
|
||||
#endif // AT_HWCAP2
|
||||
#else
|
||||
// Read the ELF HWCAP flags by parsing /proc/self/auxv.
|
||||
|
@ -445,8 +447,11 @@ CPU::CPU()
|
|||
has_dot_prod_(false),
|
||||
has_lse_(false),
|
||||
has_mte_(false),
|
||||
has_sha3_(false),
|
||||
has_pmull1q_(false),
|
||||
has_fp16_(false),
|
||||
has_hbc_(false),
|
||||
has_cssc_(false),
|
||||
is_fp64_mode_(false),
|
||||
has_non_stop_time_stamp_counter_(false),
|
||||
is_running_in_vm_(false),
|
||||
|
@ -708,7 +713,8 @@ CPU::CPU()
|
|||
}
|
||||
|
||||
// Try to extract the list of CPU features from ELF hwcaps.
|
||||
uint32_t hwcaps, hwcaps2;
|
||||
uint64_t hwcaps = 0;
|
||||
uint64_t hwcaps2 = 0;
|
||||
std::tie(hwcaps, hwcaps2) = ReadELFHWCaps();
|
||||
if (hwcaps != 0) {
|
||||
has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
|
||||
|
@ -831,15 +837,18 @@ CPU::CPU()
|
|||
|
||||
#elif V8_OS_LINUX
|
||||
// Try to extract the list of CPU features from ELF hwcaps.
|
||||
uint32_t hwcaps, hwcaps2;
|
||||
uint64_t hwcaps, hwcaps2;
|
||||
std::tie(hwcaps, hwcaps2) = ReadELFHWCaps();
|
||||
has_cssc_ = (hwcaps2 & HWCAP2_CSSC) != 0;
|
||||
has_mte_ = (hwcaps2 & HWCAP2_MTE) != 0;
|
||||
has_hbc_ = (hwcaps2 & HWCAP2_HBC) != 0;
|
||||
if (hwcaps != 0) {
|
||||
has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0;
|
||||
has_dot_prod_ = (hwcaps & HWCAP_ASIMDDP) != 0;
|
||||
has_lse_ = (hwcaps & HWCAP_ATOMICS) != 0;
|
||||
has_pmull1q_ = (hwcaps & HWCAP_PMULL) != 0;
|
||||
has_fp16_ = (hwcaps & HWCAP_FPHP) != 0;
|
||||
has_sha3_ = (hwcaps & HWCAP_SHA3) != 0;
|
||||
} else {
|
||||
// Try to fallback to "Features" CPUInfo field
|
||||
CPUInfo cpu_info;
|
||||
|
@ -849,6 +858,7 @@ CPU::CPU()
|
|||
has_lse_ = HasListItem(features, "atomics");
|
||||
has_pmull1q_ = HasListItem(features, "pmull");
|
||||
has_fp16_ = HasListItem(features, "half");
|
||||
has_sha3_ = HasListItem(features, "sha3");
|
||||
delete[] features;
|
||||
}
|
||||
#elif V8_OS_DARWIN
|
||||
|
@ -893,6 +903,14 @@ CPU::CPU()
|
|||
} else {
|
||||
has_fp16_ = fp16;
|
||||
}
|
||||
int64_t feat_sha3 = 0;
|
||||
size_t feat_sha3_size = sizeof(feat_sha3);
|
||||
if (sysctlbyname("hw.optional.arm.FEAT_SHA3", &feat_sha3, &feat_sha3_size,
|
||||
nullptr, 0) == -1) {
|
||||
has_sha3_ = false;
|
||||
} else {
|
||||
has_sha3_ = feat_sha3;
|
||||
}
|
||||
#else
|
||||
// ARM64 Macs always have JSCVT, ASIMDDP, FP16 and LSE.
|
||||
has_jscvt_ = true;
|
||||
|
@ -900,6 +918,7 @@ CPU::CPU()
|
|||
has_lse_ = true;
|
||||
has_pmull1q_ = true;
|
||||
has_fp16_ = true;
|
||||
has_sha3_ = true;
|
||||
#endif // V8_OS_IOS
|
||||
#endif // V8_OS_WIN
|
||||
|
||||
|
|
6
deps/v8/src/base/cpu.h
vendored
6
deps/v8/src/base/cpu.h
vendored
|
@ -120,8 +120,11 @@ class V8_BASE_EXPORT CPU final {
|
|||
bool has_dot_prod() const { return has_dot_prod_; }
|
||||
bool has_lse() const { return has_lse_; }
|
||||
bool has_mte() const { return has_mte_; }
|
||||
bool has_sha3() const { return has_sha3_; }
|
||||
bool has_pmull1q() const { return has_pmull1q_; }
|
||||
bool has_fp16() const { return has_fp16_; }
|
||||
bool has_hbc() const { return has_hbc_; }
|
||||
bool has_cssc() const { return has_cssc_; }
|
||||
|
||||
// mips features
|
||||
bool is_fp64_mode() const { return is_fp64_mode_; }
|
||||
|
@ -191,8 +194,11 @@ class V8_BASE_EXPORT CPU final {
|
|||
bool has_dot_prod_;
|
||||
bool has_lse_;
|
||||
bool has_mte_;
|
||||
bool has_sha3_;
|
||||
bool has_pmull1q_;
|
||||
bool has_fp16_;
|
||||
bool has_hbc_;
|
||||
bool has_cssc_;
|
||||
bool is_fp64_mode_;
|
||||
bool has_non_stop_time_stamp_counter_;
|
||||
bool is_running_in_vm_;
|
||||
|
|
1
deps/v8/src/base/doubly-threaded-list.h
vendored
1
deps/v8/src/base/doubly-threaded-list.h
vendored
|
@ -53,7 +53,6 @@ class DoublyThreadedList {
|
|||
}
|
||||
|
||||
bool operator==(end_iterator) { return !DTLTraits::non_empty(curr_); }
|
||||
bool operator!=(end_iterator) { return DTLTraits::non_empty(curr_); }
|
||||
|
||||
private:
|
||||
friend DoublyThreadedList;
|
||||
|
|
1
deps/v8/src/base/enum-set.h
vendored
1
deps/v8/src/base/enum-set.h
vendored
|
@ -58,7 +58,6 @@ class EnumSet {
|
|||
constexpr EnumSet operator~() const { return EnumSet(~bits_); }
|
||||
|
||||
constexpr bool operator==(EnumSet set) const { return bits_ == set.bits_; }
|
||||
constexpr bool operator!=(EnumSet set) const { return bits_ != set.bits_; }
|
||||
|
||||
constexpr EnumSet operator|(EnumSet set) const {
|
||||
return EnumSet(bits_ | set.bits_);
|
||||
|
|
3
deps/v8/src/base/flags.h
vendored
3
deps/v8/src/base/flags.h
vendored
|
@ -37,9 +37,6 @@ class Flags final {
|
|||
constexpr bool operator==(flag_type flag) const {
|
||||
return mask_ == static_cast<mask_type>(flag);
|
||||
}
|
||||
constexpr bool operator!=(flag_type flag) const {
|
||||
return mask_ != static_cast<mask_type>(flag);
|
||||
}
|
||||
|
||||
Flags& operator&=(const Flags& flags) {
|
||||
mask_ &= flags.mask_;
|
||||
|
|
59
deps/v8/src/base/hashmap-entry.h
vendored
59
deps/v8/src/base/hashmap-entry.h
vendored
|
@ -27,7 +27,7 @@ struct TemplateHashMapEntry {
|
|||
|
||||
Key key;
|
||||
Value value;
|
||||
uint32_t hash; // The full hash value for key
|
||||
uint32_t hash : 31; // The full hash value for key
|
||||
|
||||
TemplateHashMapEntry(Key key, Value value, uint32_t hash)
|
||||
: key(key), value(value), hash(hash), exists_(true) {}
|
||||
|
@ -37,41 +37,7 @@ struct TemplateHashMapEntry {
|
|||
void clear() { exists_ = false; }
|
||||
|
||||
private:
|
||||
bool exists_;
|
||||
};
|
||||
|
||||
// Specialization for pointer-valued keys
|
||||
template <typename Key, typename Value>
|
||||
struct TemplateHashMapEntry<Key*, Value> {
|
||||
static_assert((!std::is_same<Value, NoHashMapValue>::value));
|
||||
|
||||
Key* key;
|
||||
Value value;
|
||||
uint32_t hash; // The full hash value for key
|
||||
|
||||
TemplateHashMapEntry(Key* key, Value value, uint32_t hash)
|
||||
: key(key), value(value), hash(hash) {}
|
||||
|
||||
bool exists() const { return key != nullptr; }
|
||||
|
||||
void clear() { key = nullptr; }
|
||||
};
|
||||
|
||||
// Specialization for Address-valued keys
|
||||
template <typename Value>
|
||||
struct TemplateHashMapEntry<Address, Value> {
|
||||
static_assert((!std::is_same<Value, NoHashMapValue>::value));
|
||||
|
||||
Address key;
|
||||
Value value;
|
||||
uint32_t hash; // The full hash value for key
|
||||
|
||||
TemplateHashMapEntry(Address key, Value value, uint32_t hash)
|
||||
: key(key), value(value), hash(hash) {}
|
||||
|
||||
bool exists() const { return key != -1u; }
|
||||
|
||||
void clear() { key = -1u; }
|
||||
bool exists_ : 1;
|
||||
};
|
||||
|
||||
// Specialization for no value.
|
||||
|
@ -81,7 +47,7 @@ struct TemplateHashMapEntry<Key, NoHashMapValue> {
|
|||
Key key;
|
||||
NoHashMapValue value; // Value in union with key to not take up space.
|
||||
};
|
||||
uint32_t hash; // The full hash value for key
|
||||
uint32_t hash : 31; // The full hash value for key
|
||||
|
||||
TemplateHashMapEntry(Key key, NoHashMapValue value, uint32_t hash)
|
||||
: key(key), hash(hash), exists_(true) {}
|
||||
|
@ -91,24 +57,7 @@ struct TemplateHashMapEntry<Key, NoHashMapValue> {
|
|||
void clear() { exists_ = false; }
|
||||
|
||||
private:
|
||||
bool exists_;
|
||||
};
|
||||
|
||||
// Specialization for pointer-valued keys and no value.
|
||||
template <typename Key>
|
||||
struct TemplateHashMapEntry<Key*, NoHashMapValue> {
|
||||
union {
|
||||
Key* key;
|
||||
NoHashMapValue value; // Value in union with key to not take up space.
|
||||
};
|
||||
uint32_t hash; // The full hash value for key
|
||||
|
||||
TemplateHashMapEntry(Key* key, NoHashMapValue value, uint32_t hash)
|
||||
: key(key), hash(hash) {}
|
||||
|
||||
bool exists() const { return key != nullptr; }
|
||||
|
||||
void clear() { key = nullptr; }
|
||||
bool exists_ : 1;
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
|
|
7
deps/v8/src/base/hashmap.h
vendored
7
deps/v8/src/base/hashmap.h
vendored
|
@ -326,9 +326,7 @@ template <typename Key, typename Value, typename MatchFun,
|
|||
class AllocationPolicy>
|
||||
void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Clear() {
|
||||
// Mark all entries as empty.
|
||||
for (size_t i = 0; i < capacity(); ++i) {
|
||||
impl_.map_[i].clear();
|
||||
}
|
||||
memset(impl_.map_, 0, capacity() * sizeof(Entry));
|
||||
impl_.occupancy_ = 0;
|
||||
}
|
||||
|
||||
|
@ -360,6 +358,7 @@ template <typename LookupKey>
|
|||
typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
|
||||
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
|
||||
const LookupKey& key, uint32_t hash) const {
|
||||
hash &= 0x7FFFFFFF;
|
||||
DCHECK(base::bits::IsPowerOfTwo(capacity()));
|
||||
size_t i = hash & (capacity() - 1);
|
||||
DCHECK(i < capacity());
|
||||
|
@ -545,7 +544,7 @@ class TemplateHashMap
|
|||
}
|
||||
|
||||
value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
|
||||
bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
|
||||
bool operator==(const Iterator& other) { return entry_ == other.entry_; }
|
||||
|
||||
private:
|
||||
Iterator(const Base* map, typename Base::Entry* entry)
|
||||
|
|
3
deps/v8/src/base/iterator.h
vendored
3
deps/v8/src/base/iterator.h
vendored
|
@ -81,9 +81,6 @@ struct DerefPtrIterator : base::iterator<std::bidirectional_iterator_tag, T> {
|
|||
--ptr;
|
||||
return *this;
|
||||
}
|
||||
bool operator!=(const DerefPtrIterator& other) const {
|
||||
return ptr != other.ptr;
|
||||
}
|
||||
bool operator==(const DerefPtrIterator& other) const {
|
||||
return ptr == other.ptr;
|
||||
}
|
||||
|
|
2
deps/v8/src/base/numerics/.clang-tidy
vendored
Normal file
2
deps/v8/src/base/numerics/.clang-tidy
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
InheritParentConfig: true
|
||||
Checks: misc-include-cleaner
|
14
deps/v8/src/base/numerics/DEPS
vendored
Normal file
14
deps/v8/src/base/numerics/DEPS
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# This is a dependency-free, header-only, library, and it needs to stay that
|
||||
# way to facilitate pulling it into various third-party projects. So, this
|
||||
# file is here to protect against accidentally introducing dependencies.
|
||||
include_rules = [
|
||||
"-src",
|
||||
"+src/base/numerics",
|
||||
"+build/build_config.h",
|
||||
]
|
||||
|
||||
specific_include_rules = {
|
||||
"byte_conversions_unittest.cc": [
|
||||
"+testing/gtest/include/gtest/gtest.h",
|
||||
],
|
||||
}
|
6
deps/v8/src/base/numerics/DIR_METADATA
vendored
Normal file
6
deps/v8/src/base/numerics/DIR_METADATA
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
monorail: {
|
||||
component: "Internals"
|
||||
}
|
||||
buganizer_public: {
|
||||
component_id: 1456292
|
||||
}
|
451
deps/v8/src/base/numerics/README.md
vendored
Normal file
451
deps/v8/src/base/numerics/README.md
vendored
Normal file
|
@ -0,0 +1,451 @@
|
|||
# `base/numerics`
|
||||
|
||||
This directory contains a dependency-free, header-only library of templates
|
||||
providing well-defined semantics for safely and performantly handling a variety
|
||||
of numeric operations, including most common arithmetic operations and
|
||||
conversions.
|
||||
|
||||
The public API is broken out into the following header files:
|
||||
|
||||
* `checked_math.h` contains the `CheckedNumeric` template class and helper
|
||||
functions for performing arithmetic and conversion operations that detect
|
||||
errors and boundary conditions (e.g. overflow, truncation, etc.).
|
||||
* `clamped_math.h` contains the `ClampedNumeric` template class and
|
||||
helper functions for performing fast, clamped (i.e. [non-sticky](#notsticky)
|
||||
saturating) arithmetic operations and conversions.
|
||||
* `safe_conversions.h` contains the `StrictNumeric` template class and
|
||||
a collection of custom casting templates and helper functions for safely
|
||||
converting between a range of numeric types.
|
||||
* `safe_math.h` includes all of the previously mentioned headers.
|
||||
|
||||
*** aside
|
||||
**Note:** The `Numeric` template types implicitly convert from C numeric types
|
||||
and `Numeric` templates that are convertible to an underlying C numeric type.
|
||||
The conversion priority for `Numeric` type coercions is:
|
||||
|
||||
* `StrictNumeric` coerces to `ClampedNumeric` and `CheckedNumeric`
|
||||
* `ClampedNumeric` coerces to `CheckedNumeric`
|
||||
***
|
||||
|
||||
[TOC]
|
||||
|
||||
## Common patterns and use-cases
|
||||
|
||||
The following covers the preferred style for the most common uses of this
|
||||
library. Please don't cargo-cult from anywhere else. 😉
|
||||
|
||||
### Performing checked arithmetic type conversions
|
||||
|
||||
The `checked_cast` template converts between arbitrary arithmetic types, and is
|
||||
used for cases where a conversion failure should result in program termination:
|
||||
|
||||
```cpp
|
||||
// Crash if signed_value is out of range for buff_size.
|
||||
size_t buff_size = checked_cast<size_t>(signed_value);
|
||||
```
|
||||
|
||||
### Performing saturated (clamped) arithmetic type conversions
|
||||
|
||||
The `saturated_cast` template converts between arbitrary arithmetic types, and
|
||||
is used in cases where an out-of-bounds source value should be saturated to the
|
||||
corresponding maximum or minimum of the destination type:
|
||||
|
||||
```cpp
|
||||
// Cast to a smaller type, saturating as needed.
|
||||
int8_t eight_bit_value = saturated_cast<int8_t>(int_value);
|
||||
|
||||
// Convert from float with saturation to INT_MAX, INT_MIN, or 0 for NaN.
|
||||
int int_value = saturated_cast<int>(floating_point_value);
|
||||
```
|
||||
|
||||
`ClampCeil`, `ClampFloor`, and `ClampRound` provide similar functionality to the
|
||||
versions in `std::`, but saturate and return an integral type. An optional
|
||||
template parameter specifies the desired destination type (`int` if
|
||||
unspecified). These should be used for most floating-to-integral conversions.
|
||||
|
||||
```cpp
|
||||
// Basically saturated_cast<int>(std::round(floating_point_value)).
|
||||
int int_value = ClampRound(floating_point_value);
|
||||
|
||||
// A destination type can be explicitly specified.
|
||||
uint8_t byte_value = ClampFloor<uint8_t>(floating_point_value);
|
||||
```
|
||||
|
||||
### Enforcing arithmetic type conversions at compile-time
|
||||
|
||||
The `strict_cast` emits code that is identical to `static_cast`. However,
|
||||
provides static checks that will cause a compilation failure if the
|
||||
destination type cannot represent the full range of the source type:
|
||||
|
||||
```cpp
|
||||
// Throw a compiler error if byte_value is changed to an out-of-range-type.
|
||||
int int_value = strict_cast<int>(byte_value);
|
||||
```
|
||||
|
||||
You can also enforce these compile-time restrictions on function parameters by
|
||||
using the `StrictNumeric` template:
|
||||
|
||||
```cpp
|
||||
// Throw a compiler error if the size argument cannot be represented by a
|
||||
// size_t (e.g. passing an int will fail to compile).
|
||||
bool AllocateBuffer(void** buffer, StrictNumeric<size_t> size);
|
||||
```
|
||||
|
||||
### Comparing values between arbitrary arithmetic types
|
||||
|
||||
Both the `StrictNumeric` and `ClampedNumeric` types provide well defined
|
||||
comparisons between arbitrary arithmetic types. This allows you to perform
|
||||
comparisons that are not legal or would trigger compiler warnings or errors
|
||||
under the normal arithmetic promotion rules:
|
||||
|
||||
```cpp
|
||||
bool foo(unsigned value, int upper_bound) {
|
||||
// Converting to StrictNumeric allows this comparison to work correctly.
|
||||
if (MakeStrictNum(value) >= upper_bound)
|
||||
return false;
|
||||
```
|
||||
|
||||
*** note
|
||||
**Warning:** Do not perform manual conversions using the comparison operators.
|
||||
Instead, use the cast templates described in the previous sections, or the
|
||||
constexpr template functions `IsValueInRangeForNumericType` and
|
||||
`IsTypeInRangeForNumericType`, as these templates properly handle the full range
|
||||
of corner cases and employ various optimizations.
|
||||
***
|
||||
|
||||
### Calculating a buffer size (checked arithmetic)
|
||||
|
||||
When making exact calculations—such as for buffer lengths—it's often necessary
|
||||
to know when those calculations trigger an overflow, undefined behavior, or
|
||||
other boundary conditions. The `CheckedNumeric` template does this by storing
|
||||
a bit determining whether or not some arithmetic operation has occurred that
|
||||
would put the variable in an "invalid" state. Attempting to extract the value
|
||||
from a variable in an invalid state will trigger a check/trap condition, that
|
||||
by default will result in process termination.
|
||||
|
||||
Here's an example of a buffer calculation using a `CheckedNumeric` type (note:
|
||||
the AssignIfValid method will trigger a compile error if the result is ignored).
|
||||
|
||||
```cpp
|
||||
// Calculate the buffer size and detect if an overflow occurs.
|
||||
size_t size;
|
||||
if (!CheckAdd(kHeaderSize, CheckMul(count, kItemSize)).AssignIfValid(&size)) {
|
||||
// Handle an overflow error...
|
||||
}
|
||||
```
|
||||
|
||||
### Calculating clamped coordinates (non-sticky saturating arithmetic)
|
||||
|
||||
Certain classes of calculations—such as coordinate calculations—require
|
||||
well-defined semantics that always produce a valid result on boundary
|
||||
conditions. The `ClampedNumeric` template addresses this by providing
|
||||
performant, non-sticky saturating arithmetic operations.
|
||||
|
||||
Here's an example of using a `ClampedNumeric` to calculate an operation
|
||||
insetting a rectangle.
|
||||
|
||||
```cpp
|
||||
// Use clamped arithmetic since inset calculations might overflow.
|
||||
void Rect::Inset(int left, int top, int right, int bottom) {
|
||||
origin_ += Vector2d(left, top);
|
||||
set_width(ClampSub(width(), ClampAdd(left, right)));
|
||||
set_height(ClampSub(height(), ClampAdd(top, bottom)));
|
||||
}
|
||||
```
|
||||
|
||||
*** note
|
||||
<a name="notsticky"></a>
|
||||
The `ClampedNumeric` type is not "sticky", which means the saturation is not
|
||||
retained across individual operations. As such, one arithmetic operation may
|
||||
result in a saturated value, while the next operation may then "desaturate"
|
||||
the value. Here's an example:
|
||||
|
||||
```cpp
|
||||
ClampedNumeric<int> value = INT_MAX;
|
||||
++value; // value is still INT_MAX, due to saturation.
|
||||
--value; // value is now (INT_MAX - 1), because saturation is not sticky.
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## Conversion functions and StrictNumeric<> in safe_conversions.h
|
||||
|
||||
This header includes a collection of helper `constexpr` templates for safely
|
||||
performing a range of conversions, assignments, and tests.
|
||||
|
||||
### Safe casting templates
|
||||
|
||||
* `as_signed()` - Returns the supplied integral value as a signed type of
|
||||
the same width.
|
||||
* `as_unsigned()` - Returns the supplied integral value as an unsigned type
|
||||
of the same width.
|
||||
* `checked_cast<>()` - Analogous to `static_cast<>` for numeric types, except
|
||||
that by default it will trigger a crash on an out-of-bounds conversion (e.g.
|
||||
overflow, underflow, NaN to integral) or a compile error if the conversion
|
||||
error can be detected at compile time. The crash handler can be overridden
|
||||
to perform a behavior other than crashing.
|
||||
* `saturated_cast<>()` - Analogous to `static_cast` for numeric types, except
|
||||
that it returns a saturated result when the specified numeric conversion
|
||||
would otherwise overflow or underflow. An NaN source returns 0 by
|
||||
default, but can be overridden to return a different result.
|
||||
* `strict_cast<>()` - Analogous to `static_cast` for numeric types, except
|
||||
this causes a compile failure if the destination type is not large
|
||||
enough to contain any value in the source type. It performs no runtime
|
||||
checking and thus introduces no runtime overhead.
|
||||
|
||||
### Other helper and conversion functions
|
||||
|
||||
* `ClampCeil<>()` - A convenience function that computes the ceil of its floating-
|
||||
point arg, then saturates to the destination type (template parameter,
|
||||
defaults to `int`).
|
||||
* `ClampFloor<>()` - A convenience function that computes the floor of its
|
||||
floating-point arg, then saturates to the destination type (template
|
||||
parameter, defaults to `int`).
|
||||
* `IsTypeInRangeForNumericType<>()` - A convenience function that evaluates
|
||||
entirely at compile-time and returns true if the destination type (first
|
||||
template parameter) can represent the full range of the source type
|
||||
(second template parameter).
|
||||
* `IsValueInRangeForNumericType<>()` - A convenience function that returns
|
||||
true if the type supplied as the template parameter can represent the value
|
||||
passed as an argument to the function.
|
||||
* `IsValueNegative()` - A convenience function that will accept any
|
||||
arithmetic type as an argument and will return whether the value is less
|
||||
than zero. Unsigned types always return false.
|
||||
* `ClampRound<>()` - A convenience function that rounds its floating-point arg,
|
||||
then saturates to the destination type (template parameter, defaults to
|
||||
`int`).
|
||||
* `SafeUnsignedAbs()` - Returns the absolute value of the supplied integer
|
||||
parameter as an unsigned result (thus avoiding an overflow if the value
|
||||
is the signed, two's complement minimum).
|
||||
|
||||
### StrictNumeric<>
|
||||
|
||||
`StrictNumeric<>` is a wrapper type that performs assignments and copies via
|
||||
the `strict_cast` template, and can perform valid arithmetic comparisons
|
||||
across any range of arithmetic types. `StrictNumeric` is the return type for
|
||||
values extracted from a `CheckedNumeric` class instance. The raw numeric value
|
||||
is extracted via `static_cast` to the underlying type or any type with
|
||||
sufficient range to represent the underlying type.
|
||||
|
||||
* `MakeStrictNum()` - Creates a new `StrictNumeric` from the underlying type
|
||||
of the supplied arithmetic or StrictNumeric type.
|
||||
* `SizeT` - Alias for `StrictNumeric<size_t>`.
|
||||
|
||||
## CheckedNumeric<> in checked_math.h
|
||||
|
||||
`CheckedNumeric<>` implements all the logic and operators for detecting integer
|
||||
boundary conditions such as overflow, underflow, and invalid conversions.
|
||||
The `CheckedNumeric` type implicitly converts from floating point and integer
|
||||
data types, and contains overloads for basic arithmetic operations (i.e.: `+`,
|
||||
`-`, `*`, `/` for all types and `%`, `<<`, `>>`, `&`, `|`, `^` for integers).
|
||||
However, *the [variadic template functions
|
||||
](#CheckedNumeric_in-checked_math_h-Non_member-helper-functions)
|
||||
are the preferred API,* as they remove type ambiguities and help prevent a number
|
||||
of common errors. The variadic functions can also be more performant, as they
|
||||
eliminate redundant expressions that are unavoidable with the with the operator
|
||||
overloads. (Ideally the compiler should optimize those away, but better to avoid
|
||||
them in the first place.)
|
||||
|
||||
Type promotions are a slightly modified version of the [standard C/C++ numeric
|
||||
promotions
|
||||
](http://en.cppreference.com/w/cpp/language/implicit_conversion#Numeric_promotions)
|
||||
with the two differences being that *there is no default promotion to int*
|
||||
and *bitwise logical operations always return an unsigned of the wider type.*
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
#include "src/base/numerics/checked_math.h"
|
||||
...
|
||||
CheckedNumeric<uint32_t> variable = 0;
|
||||
variable++;
|
||||
variable--;
|
||||
if (variable.ValueOrDie() == 0)
|
||||
// Fine, |variable| still within valid range.
|
||||
|
||||
variable--;
|
||||
variable++;
|
||||
if (variable.ValueOrDie() == 0) // Breakpoint or configured CheckHandler
|
||||
// Does not happen as variable underflowed.
|
||||
```
|
||||
|
||||
### Members
|
||||
|
||||
The unary negation, increment, and decrement operators are supported, along
|
||||
with the following unary arithmetic methods, which return a new
|
||||
`CheckedNumeric` as a result of the operation:
|
||||
|
||||
* `Abs()` - Absolute value.
|
||||
* `UnsignedAbs()` - Absolute value as an equal-width unsigned underlying type
|
||||
(valid for only integral types).
|
||||
* `Max()` - Returns whichever is greater of the current instance or argument.
|
||||
The underlying return type is whichever has the greatest magnitude.
|
||||
* `Min()` - Returns whichever is lowest of the current instance or argument.
|
||||
The underlying return type is whichever has can represent the lowest
|
||||
number in the smallest width (e.g. int8_t over unsigned, int over
|
||||
int8_t, and float over int).
|
||||
|
||||
The following are for converting `CheckedNumeric` instances:
|
||||
|
||||
* `type` - The underlying numeric type.
|
||||
* `AssignIfValid()` - Assigns the underlying value to the supplied
|
||||
destination pointer if the value is currently valid and within the
|
||||
range supported by the destination type. Returns true on success.
|
||||
* `Cast<>()` - Instance method returning a `CheckedNumeric` derived from
|
||||
casting the current instance to a `CheckedNumeric` of the supplied
|
||||
destination type.
|
||||
|
||||
*** aside
|
||||
The following member functions return a `StrictNumeric`, which is valid for
|
||||
comparison and assignment operations, but will trigger a compile failure on
|
||||
attempts to assign to a type of insufficient range. The underlying value can
|
||||
be extracted by an explicit `static_cast` to the underlying type or any type
|
||||
with sufficient range to represent the underlying type.
|
||||
***
|
||||
|
||||
* `IsValid()` - Returns true if the underlying numeric value is valid (i.e.
|
||||
has not wrapped or saturated and is not the result of an invalid
|
||||
conversion).
|
||||
* `ValueOrDie()` - Returns the underlying value. If the state is not valid
|
||||
this call will trigger a crash by default (but may be overridden by
|
||||
supplying an alternate handler to the template).
|
||||
* `ValueOrDefault()` - Returns the current value, or the supplied default if
|
||||
the state is not valid (but will not crash).
|
||||
|
||||
**Comparison operators are explicitly not provided** for `CheckedNumeric`
|
||||
types because they could result in a crash if the type is not in a valid state.
|
||||
Patterns like the following should be used instead:
|
||||
|
||||
```cpp
|
||||
// Either input or padding (or both) may be arbitrary sizes.
|
||||
size_t buff_size;
|
||||
if (!CheckAdd(input, padding, kHeaderLength).AssignIfValid(&buff_size) ||
|
||||
buff_size >= kMaxBuffer) {
|
||||
// Handle an error...
|
||||
} else {
|
||||
// Do stuff on success...
|
||||
}
|
||||
```
|
||||
|
||||
### Non-member helper functions
|
||||
|
||||
The following variadic convenience functions, which accept standard arithmetic
|
||||
or `CheckedNumeric` types, perform arithmetic operations, and return a
|
||||
`CheckedNumeric` result. The supported functions are:
|
||||
|
||||
* `CheckAdd()` - Addition.
|
||||
* `CheckSub()` - Subtraction.
|
||||
* `CheckMul()` - Multiplication.
|
||||
* `CheckDiv()` - Division.
|
||||
* `CheckMod()` - Modulus (integer only).
|
||||
* `CheckLsh()` - Left integer shift (integer only).
|
||||
* `CheckRsh()` - Right integer shift (integer only).
|
||||
* `CheckAnd()` - Bitwise AND (integer only with unsigned result).
|
||||
* `CheckOr()` - Bitwise OR (integer only with unsigned result).
|
||||
* `CheckXor()` - Bitwise XOR (integer only with unsigned result).
|
||||
* `CheckMax()` - Maximum of supplied arguments.
|
||||
* `CheckMin()` - Minimum of supplied arguments.
|
||||
|
||||
The following wrapper functions can be used to avoid the template
|
||||
disambiguator syntax when converting a destination type.
|
||||
|
||||
* `IsValidForType<>()` in place of: `a.template IsValid<>()`
|
||||
* `ValueOrDieForType<>()` in place of: `a.template ValueOrDie<>()`
|
||||
* `ValueOrDefaultForType<>()` in place of: `a.template ValueOrDefault<>()`
|
||||
|
||||
The following general utility methods is are useful for converting from
|
||||
arithmetic types to `CheckedNumeric` types:
|
||||
|
||||
* `MakeCheckedNum()` - Creates a new `CheckedNumeric` from the underlying type
|
||||
of the supplied arithmetic or directly convertible type.
|
||||
|
||||
## ClampedNumeric<> in clamped_math.h
|
||||
|
||||
`ClampedNumeric<>` implements all the logic and operators for clamped
|
||||
(non-sticky saturating) arithmetic operations and conversions. The
|
||||
`ClampedNumeric` type implicitly converts back and forth between floating point
|
||||
and integer data types, saturating on assignment as appropriate. It contains
|
||||
overloads for basic arithmetic operations (i.e.: `+`, `-`, `*`, `/` for
|
||||
all types and `%`, `<<`, `>>`, `&`, `|`, `^` for integers) along with comparison
|
||||
operators for arithmetic types of any size. However, *the [variadic template
|
||||
functions
|
||||
](#ClampedNumeric_in-clamped_math_h-Non_member-helper-functions)
|
||||
are the preferred API,* as they remove type ambiguities and help prevent
|
||||
a number of common errors. The variadic functions can also be more performant,
|
||||
as they eliminate redundant expressions that are unavoidable with the operator
|
||||
overloads. (Ideally the compiler should optimize those away, but better to avoid
|
||||
them in the first place.)
|
||||
|
||||
Type promotions are a slightly modified version of the [standard C/C++ numeric
|
||||
promotions
|
||||
](http://en.cppreference.com/w/cpp/language/implicit_conversion#Numeric_promotions)
|
||||
with the two differences being that *there is no default promotion to int*
|
||||
and *bitwise logical operations always return an unsigned of the wider type.*
|
||||
|
||||
*** aside
|
||||
Most arithmetic operations saturate normally, to the numeric limit in the
|
||||
direction of the sign. The potentially unusual cases are:
|
||||
|
||||
* **Division:** Division by zero returns the saturated limit in the direction
|
||||
of sign of the dividend (first argument). The one exception is 0/0, which
|
||||
returns zero (although logically is NaN).
|
||||
* **Modulus:** Division by zero returns the dividend (first argument).
|
||||
* **Left shift:** Non-zero values saturate in the direction of the signed
|
||||
limit (max/min), even for shifts larger than the bit width. 0 shifted any
|
||||
amount results in 0.
|
||||
* **Right shift:** Negative values saturate to -1. Positive or 0 saturates
|
||||
to 0. (Effectively just an unbounded arithmetic-right-shift.)
|
||||
* **Bitwise operations:** No saturation; bit pattern is identical to
|
||||
non-saturated bitwise operations.
|
||||
***
|
||||
|
||||
### Members
|
||||
|
||||
The unary negation, increment, and decrement operators are supported, along
|
||||
with the following unary arithmetic methods, which return a new
|
||||
`ClampedNumeric` as a result of the operation:
|
||||
|
||||
* `Abs()` - Absolute value.
|
||||
* `UnsignedAbs()` - Absolute value as an equal-width unsigned underlying type
|
||||
(valid for only integral types).
|
||||
* `Max()` - Returns whichever is greater of the current instance or argument.
|
||||
The underlying return type is whichever has the greatest magnitude.
|
||||
* `Min()` - Returns whichever is lowest of the current instance or argument.
|
||||
The underlying return type is whichever has can represent the lowest
|
||||
number in the smallest width (e.g. int8_t over unsigned, int over
|
||||
int8_t, and float over int).
|
||||
|
||||
The following are for converting `ClampedNumeric` instances:
|
||||
|
||||
* `type` - The underlying numeric type.
|
||||
* `RawValue()` - Returns the raw value as the underlying arithmetic type. This
|
||||
is useful when e.g. assigning to an auto type or passing as a deduced
|
||||
template parameter.
|
||||
* `Cast<>()` - Instance method returning a `ClampedNumeric` derived from
|
||||
casting the current instance to a `ClampedNumeric` of the supplied
|
||||
destination type.
|
||||
|
||||
### Non-member helper functions
|
||||
|
||||
The following variadic convenience functions, which accept standard arithmetic
|
||||
or `ClampedNumeric` types, perform arithmetic operations, and return a
|
||||
`ClampedNumeric` result. The supported functions are:
|
||||
|
||||
* `ClampAdd()` - Addition.
|
||||
* `ClampSub()` - Subtraction.
|
||||
* `ClampMul()` - Multiplication.
|
||||
* `ClampDiv()` - Division.
|
||||
* `ClampMod()` - Modulus (integer only).
|
||||
* `ClampLsh()` - Left integer shift (integer only).
|
||||
* `ClampRsh()` - Right integer shift (integer only).
|
||||
* `ClampAnd()` - Bitwise AND (integer only with unsigned result).
|
||||
* `ClampOr()` - Bitwise OR (integer only with unsigned result).
|
||||
* `ClampXor()` - Bitwise XOR (integer only with unsigned result).
|
||||
* `ClampMax()` - Maximum of supplied arguments.
|
||||
* `ClampMin()` - Minimum of supplied arguments.
|
||||
|
||||
The following is a general utility method that is useful for converting
|
||||
to a `ClampedNumeric` type:
|
||||
|
||||
* `MakeClampedNum()` - Creates a new `ClampedNumeric` from the underlying type
|
||||
of the supplied arithmetic or directly convertible type.
|
30
deps/v8/src/base/numerics/angle_conversions.h
vendored
Normal file
30
deps/v8/src/base/numerics/angle_conversions.h
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_ANGLE_CONVERSIONS_H_
|
||||
#define V8_BASE_NUMERICS_ANGLE_CONVERSIONS_H_
|
||||
|
||||
#include <concepts>
|
||||
#include <numbers>
|
||||
|
||||
namespace v8::base {
|
||||
|
||||
template <typename T>
|
||||
requires std::floating_point<T>
|
||||
constexpr T DegToRad(T deg) {
|
||||
return deg * std::numbers::pi_v<T> / 180;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires std::floating_point<T>
|
||||
constexpr T RadToDeg(T rad) {
|
||||
return rad * 180 / std::numbers::pi_v<T>;
|
||||
}
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_ANGLE_CONVERSIONS_H_
|
162
deps/v8/src/base/numerics/basic_ops_impl.h
vendored
Normal file
162
deps/v8/src/base/numerics/basic_ops_impl.h
vendored
Normal file
|
@ -0,0 +1,162 @@
|
|||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifdef UNSAFE_BUFFERS_BUILD
|
||||
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
|
||||
#pragma allow_unsafe_libc_calls
|
||||
#endif
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_BASIC_OPS_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_BASIC_OPS_IMPL_H_
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <span>
|
||||
#include <type_traits>
|
||||
|
||||
namespace v8::base::internal {
|
||||
|
||||
// The correct type to perform math operations on given values of type `T`. This
|
||||
// may be a larger type than `T` to avoid promotion to `int` which involves sign
|
||||
// conversion!
|
||||
template <class T>
|
||||
requires(std::is_integral_v<T>)
|
||||
using MathType = std::conditional_t<
|
||||
sizeof(T) >= sizeof(int), T,
|
||||
std::conditional_t<std::is_signed_v<T>, int, unsigned int>>;
|
||||
|
||||
// Reverses the byte order of the integer.
|
||||
template <class T>
|
||||
requires(std::is_unsigned_v<T> && std::is_integral_v<T>)
|
||||
inline constexpr T SwapBytes(T value) {
|
||||
// MSVC intrinsics are not constexpr, so we provide our own constexpr
|
||||
// implementation. We provide it unconditionally so we can test it on all
|
||||
// platforms for correctness.
|
||||
if (std::is_constant_evaluated()) {
|
||||
if constexpr (sizeof(T) == 1u) {
|
||||
return value;
|
||||
} else if constexpr (sizeof(T) == 2u) {
|
||||
MathType<T> a = (MathType<T>(value) >> 0) & MathType<T>{0xff};
|
||||
MathType<T> b = (MathType<T>(value) >> 8) & MathType<T>{0xff};
|
||||
return static_cast<T>((a << 8) | (b << 0));
|
||||
} else if constexpr (sizeof(T) == 4u) {
|
||||
T a = (value >> 0) & T{0xff};
|
||||
T b = (value >> 8) & T{0xff};
|
||||
T c = (value >> 16) & T{0xff};
|
||||
T d = (value >> 24) & T{0xff};
|
||||
return (a << 24) | (b << 16) | (c << 8) | (d << 0);
|
||||
} else {
|
||||
static_assert(sizeof(T) == 8u);
|
||||
T a = (value >> 0) & T{0xff};
|
||||
T b = (value >> 8) & T{0xff};
|
||||
T c = (value >> 16) & T{0xff};
|
||||
T d = (value >> 24) & T{0xff};
|
||||
T e = (value >> 32) & T{0xff};
|
||||
T f = (value >> 40) & T{0xff};
|
||||
T g = (value >> 48) & T{0xff};
|
||||
T h = (value >> 56) & T{0xff};
|
||||
return (a << 56) | (b << 48) | (c << 40) | (d << 32) | //
|
||||
(e << 24) | (f << 16) | (g << 8) | (h << 0);
|
||||
}
|
||||
}
|
||||
|
||||
#if _MSC_VER
|
||||
if constexpr (sizeof(T) == 1u) {
|
||||
return value;
|
||||
// NOLINTNEXTLINE(runtime/int)
|
||||
} else if constexpr (sizeof(T) == sizeof(unsigned short)) {
|
||||
using U = unsigned short; // NOLINT(runtime/int)
|
||||
return _byteswap_ushort(U{value});
|
||||
// NOLINTNEXTLINE(runtime/int)
|
||||
} else if constexpr (sizeof(T) == sizeof(unsigned long)) {
|
||||
using U = unsigned long; // NOLINT(runtime/int)
|
||||
return _byteswap_ulong(U{value});
|
||||
} else {
|
||||
static_assert(sizeof(T) == 8u);
|
||||
return _byteswap_uint64(value);
|
||||
}
|
||||
#else
|
||||
if constexpr (sizeof(T) == 1u) {
|
||||
return value;
|
||||
} else if constexpr (sizeof(T) == 2u) {
|
||||
return __builtin_bswap16(uint16_t{value});
|
||||
} else if constexpr (sizeof(T) == 4u) {
|
||||
return __builtin_bswap32(value);
|
||||
} else {
|
||||
static_assert(sizeof(T) == 8u);
|
||||
return __builtin_bswap64(value);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Signed values are byte-swapped as unsigned values.
|
||||
template <class T>
|
||||
requires(std::is_signed_v<T> && std::is_integral_v<T>)
|
||||
inline constexpr T SwapBytes(T value) {
|
||||
return static_cast<T>(SwapBytes(static_cast<std::make_unsigned_t<T>>(value)));
|
||||
}
|
||||
|
||||
// Converts from a byte array to an integer.
|
||||
template <class T>
|
||||
requires(std::is_unsigned_v<T> && std::is_integral_v<T>)
|
||||
inline constexpr T FromLittleEndian(std::span<const uint8_t, sizeof(T)> bytes) {
|
||||
T val;
|
||||
if (std::is_constant_evaluated()) {
|
||||
val = T{0};
|
||||
for (size_t i = 0u; i < sizeof(T); i += 1u) {
|
||||
// SAFETY: `i < sizeof(T)` (the number of bytes in T), so `(8 * i)` is
|
||||
// less than the number of bits in T.
|
||||
val |= MathType<T>(bytes[i]) << (8u * i);
|
||||
}
|
||||
} else {
|
||||
// SAFETY: `bytes` has sizeof(T) bytes, and `val` is of type `T` so has
|
||||
// sizeof(T) bytes, and the two can not alias as `val` is a stack variable.
|
||||
memcpy(&val, bytes.data(), sizeof(T));
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
requires(std::is_signed_v<T> && std::is_integral_v<T>)
|
||||
inline constexpr T FromLittleEndian(std::span<const uint8_t, sizeof(T)> bytes) {
|
||||
return static_cast<T>(FromLittleEndian<std::make_unsigned_t<T>>(bytes));
|
||||
}
|
||||
|
||||
// Converts to a byte array from an integer.
|
||||
template <class T>
|
||||
requires(std::is_unsigned_v<T> && std::is_integral_v<T>)
|
||||
inline constexpr std::array<uint8_t, sizeof(T)> ToLittleEndian(T val) {
|
||||
auto bytes = std::array<uint8_t, sizeof(T)>();
|
||||
if (std::is_constant_evaluated()) {
|
||||
for (size_t i = 0u; i < sizeof(T); i += 1u) {
|
||||
const auto last_byte = static_cast<uint8_t>(val & 0xff);
|
||||
// The low bytes go to the front of the array in little endian.
|
||||
bytes[i] = last_byte;
|
||||
// If `val` is one byte, this shift would be UB. But it's also not needed
|
||||
// since the loop will not run again.
|
||||
if constexpr (sizeof(T) > 1u) {
|
||||
val >>= 8u;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// SAFETY: `bytes` has sizeof(T) bytes, and `val` is of type `T` so has
|
||||
// sizeof(T) bytes, and the two can not alias as `val` is a stack variable.
|
||||
memcpy(bytes.data(), &val, sizeof(T));
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
requires(std::is_signed_v<T> && std::is_integral_v<T>)
|
||||
inline constexpr std::array<uint8_t, sizeof(T)> ToLittleEndian(T val) {
|
||||
return ToLittleEndian(static_cast<std::make_unsigned_t<T>>(val));
|
||||
}
|
||||
} // namespace v8::base::internal
|
||||
|
||||
#endif // V8_BASE_NUMERICS_BASIC_OPS_IMPL_H_
|
716
deps/v8/src/base/numerics/byte_conversions.h
vendored
Normal file
716
deps/v8/src/base/numerics/byte_conversions.h
vendored
Normal file
|
@ -0,0 +1,716 @@
|
|||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_BYTE_CONVERSIONS_H_
|
||||
#define V8_BASE_NUMERICS_BYTE_CONVERSIONS_H_
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <span>
|
||||
#include <type_traits>
|
||||
|
||||
#include "build/build_config.h"
|
||||
#include "src/base/numerics/basic_ops_impl.h"
|
||||
|
||||
// Chromium only builds and runs on Little Endian machines.
|
||||
static_assert(ARCH_CPU_LITTLE_ENDIAN);
|
||||
|
||||
namespace v8::base {
|
||||
|
||||
// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
|
||||
// TODO(pkasting): Once C++23 is available, replace with std::byteswap.
|
||||
template <class T>
|
||||
requires(std::is_integral_v<T>)
|
||||
inline constexpr T ByteSwap(T value) {
|
||||
return internal::SwapBytes(value);
|
||||
}
|
||||
|
||||
// Returns a uint8_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
//
|
||||
// Note that since a single byte can have only one ordering, this just copies
|
||||
// the byte out of the span. This provides a consistent function for the
|
||||
// operation nonetheless.
|
||||
inline constexpr uint8_t U8FromNativeEndian(
|
||||
std::span<const uint8_t, 1u> bytes) {
|
||||
return bytes[0];
|
||||
}
|
||||
// Returns a uint16_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr uint16_t U16FromNativeEndian(
|
||||
std::span<const uint8_t, 2u> bytes) {
|
||||
return internal::FromLittleEndian<uint16_t>(bytes);
|
||||
}
|
||||
// Returns a uint32_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr uint32_t U32FromNativeEndian(
|
||||
std::span<const uint8_t, 4u> bytes) {
|
||||
return internal::FromLittleEndian<uint32_t>(bytes);
|
||||
}
|
||||
// Returns a uint64_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr uint64_t U64FromNativeEndian(
|
||||
std::span<const uint8_t, 8u> bytes) {
|
||||
return internal::FromLittleEndian<uint64_t>(bytes);
|
||||
}
|
||||
// Returns a int8_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
//
|
||||
// Note that since a single byte can have only one ordering, this just copies
|
||||
// the byte out of the span. This provides a consistent function for the
|
||||
// operation nonetheless.
|
||||
inline constexpr int8_t I8FromNativeEndian(std::span<const uint8_t, 1u> bytes) {
|
||||
return static_cast<int8_t>(bytes[0]);
|
||||
}
|
||||
// Returns a int16_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr int16_t I16FromNativeEndian(
|
||||
std::span<const uint8_t, 2u> bytes) {
|
||||
return internal::FromLittleEndian<int16_t>(bytes);
|
||||
}
|
||||
// Returns a int32_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr int32_t I32FromNativeEndian(
|
||||
std::span<const uint8_t, 4u> bytes) {
|
||||
return internal::FromLittleEndian<int32_t>(bytes);
|
||||
}
|
||||
// Returns a int64_t with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for decoding integers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr int64_t I64FromNativeEndian(
|
||||
std::span<const uint8_t, 8u> bytes) {
|
||||
return internal::FromLittleEndian<int64_t>(bytes);
|
||||
}
|
||||
|
||||
// Returns a float with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the number for the machine.
|
||||
//
|
||||
// This is suitable for decoding numbers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr float FloatFromNativeEndian(
|
||||
std::span<const uint8_t, 4u> bytes) {
|
||||
return std::bit_cast<float>(U32FromNativeEndian(bytes));
|
||||
}
|
||||
// Returns a double with the value in `bytes` interpreted as the native endian
|
||||
// encoding of the number for the machine.
|
||||
//
|
||||
// This is suitable for decoding numbers that were always kept in native
|
||||
// encoding, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer. Prefer an explicit little endian when storing and reading data from
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr double DoubleFromNativeEndian(
|
||||
std::span<const uint8_t, 8u> bytes) {
|
||||
return std::bit_cast<double>(U64FromNativeEndian(bytes));
|
||||
}
|
||||
|
||||
// Returns a uint8_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
//
|
||||
// Note that since a single byte can have only one ordering, this just copies
|
||||
// the byte out of the span. This provides a consistent function for the
|
||||
// operation nonetheless.
|
||||
inline constexpr uint8_t U8FromLittleEndian(
|
||||
std::span<const uint8_t, 1u> bytes) {
|
||||
return bytes[0];
|
||||
}
|
||||
// Returns a uint16_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr uint16_t U16FromLittleEndian(
|
||||
std::span<const uint8_t, 2u> bytes) {
|
||||
return internal::FromLittleEndian<uint16_t>(bytes);
|
||||
}
|
||||
// Returns a uint32_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr uint32_t U32FromLittleEndian(
|
||||
std::span<const uint8_t, 4u> bytes) {
|
||||
return internal::FromLittleEndian<uint32_t>(bytes);
|
||||
}
|
||||
// Returns a uint64_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr uint64_t U64FromLittleEndian(
|
||||
std::span<const uint8_t, 8u> bytes) {
|
||||
return internal::FromLittleEndian<uint64_t>(bytes);
|
||||
}
|
||||
// Returns a int8_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
//
|
||||
// Note that since a single byte can have only one ordering, this just copies
|
||||
// the byte out of the span. This provides a consistent function for the
|
||||
// operation nonetheless.
|
||||
inline constexpr int8_t I8FromLittleEndian(std::span<const uint8_t, 1u> bytes) {
|
||||
return static_cast<int8_t>(bytes[0]);
|
||||
}
|
||||
// Returns a int16_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr int16_t I16FromLittleEndian(
|
||||
std::span<const uint8_t, 2u> bytes) {
|
||||
return internal::FromLittleEndian<int16_t>(bytes);
|
||||
}
|
||||
// Returns a int32_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr int32_t I32FromLittleEndian(
|
||||
std::span<const uint8_t, 4u> bytes) {
|
||||
return internal::FromLittleEndian<int32_t>(bytes);
|
||||
}
|
||||
// Returns a int64_t with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr int64_t I64FromLittleEndian(
|
||||
std::span<const uint8_t, 8u> bytes) {
|
||||
return internal::FromLittleEndian<int64_t>(bytes);
|
||||
}
|
||||
// Returns a float with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding numbers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr float FloatFromLittleEndian(
|
||||
std::span<const uint8_t, 4u> bytes) {
|
||||
return std::bit_cast<float>(U32FromLittleEndian(bytes));
|
||||
}
|
||||
// Returns a double with the value in `bytes` interpreted as a little-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding numbers encoded explicitly in little endian,
|
||||
// which is a good practice with storing and reading data from storage. Use
|
||||
// the native-endian versions when working with values that were always in
|
||||
// memory, such as when stored in shared-memory (or through IPC) as a byte
|
||||
// buffer.
|
||||
inline constexpr double DoubleFromLittleEndian(
|
||||
std::span<const uint8_t, 8u> bytes) {
|
||||
return std::bit_cast<double>(U64FromLittleEndian(bytes));
|
||||
}
|
||||
|
||||
// Returns a uint8_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
//
|
||||
// Note that since a single byte can have only one ordering, this just copies
|
||||
// the byte out of the span. This provides a consistent function for the
|
||||
// operation nonetheless.
|
||||
inline constexpr uint8_t U8FromBigEndian(std::span<const uint8_t, 1u> bytes) {
|
||||
return bytes[0];
|
||||
}
|
||||
// Returns a uint16_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr uint16_t U16FromBigEndian(std::span<const uint8_t, 2u> bytes) {
|
||||
return ByteSwap(internal::FromLittleEndian<uint16_t>(bytes));
|
||||
}
|
||||
// Returns a uint32_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr uint32_t U32FromBigEndian(std::span<const uint8_t, 4u> bytes) {
|
||||
return ByteSwap(internal::FromLittleEndian<uint32_t>(bytes));
|
||||
}
|
||||
// Returns a uint64_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr uint64_t U64FromBigEndian(std::span<const uint8_t, 8u> bytes) {
|
||||
return ByteSwap(internal::FromLittleEndian<uint64_t>(bytes));
|
||||
}
|
||||
// Returns a int8_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
//
|
||||
// Note that since a single byte can have only one ordering, this just copies
|
||||
// the byte out of the span. This provides a consistent function for the
|
||||
// operation nonetheless.
|
||||
inline constexpr int8_t I8FromBigEndian(std::span<const uint8_t, 1u> bytes) {
|
||||
return static_cast<int8_t>(bytes[0]);
|
||||
}
|
||||
// Returns a int16_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr int16_t I16FromBigEndian(std::span<const uint8_t, 2u> bytes) {
|
||||
return ByteSwap(internal::FromLittleEndian<int16_t>(bytes));
|
||||
}
|
||||
// Returns a int32_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr int32_t I32FromBigEndian(std::span<const uint8_t, 4u> bytes) {
|
||||
return ByteSwap(internal::FromLittleEndian<int32_t>(bytes));
|
||||
}
|
||||
// Returns a int64_t with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding integers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr int64_t I64FromBigEndian(std::span<const uint8_t, 8u> bytes) {
|
||||
return ByteSwap(internal::FromLittleEndian<int64_t>(bytes));
|
||||
}
|
||||
// Returns a float with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding numbers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr float FloatFromBigEndian(std::span<const uint8_t, 4u> bytes) {
|
||||
return std::bit_cast<float>(U32FromBigEndian(bytes));
|
||||
}
|
||||
// Returns a double with the value in `bytes` interpreted as a big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for decoding numbers encoded explicitly in big endian, such
|
||||
// as for network order. Use the native-endian versions when working with values
|
||||
// that were always in memory, such as when stored in shared-memory (or through
|
||||
// IPC) as a byte buffer.
|
||||
inline constexpr double DoubleFromBigEndian(
|
||||
std::span<const uint8_t, 8u> bytes) {
|
||||
return std::bit_cast<double>(U64FromBigEndian(bytes));
|
||||
}
|
||||
|
||||
// Returns a byte array holding the value of a uint8_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 1u> U8ToNativeEndian(uint8_t val) {
|
||||
return {val};
|
||||
}
|
||||
// Returns a byte array holding the value of a uint16_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 2u> U16ToNativeEndian(uint16_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a uint32_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 4u> U32ToNativeEndian(uint32_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a uint64_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 8u> U64ToNativeEndian(uint64_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a int8_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 1u> I8ToNativeEndian(int8_t val) {
|
||||
return {static_cast<uint8_t>(val)};
|
||||
}
|
||||
// Returns a byte array holding the value of a int16_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 2u> I16ToNativeEndian(int16_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a int32_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 4u> I32ToNativeEndian(int32_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a int64_t encoded as the native
|
||||
// endian encoding of the integer for the machine.
|
||||
//
|
||||
// This is suitable for encoding integers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 8u> I64ToNativeEndian(int64_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a float encoded as the native
|
||||
// endian encoding of the number for the machine.
|
||||
//
|
||||
// This is suitable for encoding numbers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 4u> FloatToNativeEndian(float val) {
|
||||
return U32ToNativeEndian(std::bit_cast<uint32_t>(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a double encoded as the native
|
||||
// endian encoding of the number for the machine.
|
||||
//
|
||||
// This is suitable for encoding numbers that will always be kept in native
|
||||
// encoding, such as for storing in shared-memory (or sending through IPC) as a
|
||||
// byte buffer. Prefer an explicit little endian when storing data into external
|
||||
// storage, and explicit big endian for network order.
|
||||
inline constexpr std::array<uint8_t, 8u> DoubleToNativeEndian(double val) {
|
||||
return U64ToNativeEndian(std::bit_cast<uint64_t>(val));
|
||||
}
|
||||
|
||||
// Returns a byte array holding the value of a uint8_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 1u> U8ToLittleEndian(uint8_t val) {
|
||||
return {val};
|
||||
}
|
||||
// Returns a byte array holding the value of a uint16_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 2u> U16ToLittleEndian(uint16_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a uint32_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 4u> U32ToLittleEndian(uint32_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a uint64_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 8u> U64ToLittleEndian(uint64_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a int8_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 1u> I8ToLittleEndian(int8_t val) {
|
||||
return {static_cast<uint8_t>(val)};
|
||||
}
|
||||
// Returns a byte array holding the value of a int16_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 2u> I16ToLittleEndian(int16_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a int32_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 4u> I32ToLittleEndian(int32_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a int64_t encoded as the
|
||||
// little-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 8u> I64ToLittleEndian(int64_t val) {
|
||||
return internal::ToLittleEndian(val);
|
||||
}
|
||||
// Returns a byte array holding the value of a float encoded as the
|
||||
// little-endian encoding of the number.
|
||||
//
|
||||
// This is suitable for encoding numbers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 4u> FloatToLittleEndian(float val) {
|
||||
return internal::ToLittleEndian(std::bit_cast<uint32_t>(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a double encoded as the
|
||||
// little-endian encoding of the number.
|
||||
//
|
||||
// This is suitable for encoding numbers explicitly in little endian, which is
|
||||
// a good practice with storing and reading data from storage. Use the
|
||||
// native-endian versions when working with values that will always be in
|
||||
// memory, such as when stored in shared-memory (or passed through IPC) as a
|
||||
// byte buffer.
|
||||
inline constexpr std::array<uint8_t, 8u> DoubleToLittleEndian(double val) {
|
||||
return internal::ToLittleEndian(std::bit_cast<uint64_t>(val));
|
||||
}
|
||||
|
||||
// Returns a byte array holding the value of a uint8_t encoded as the big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 1u> U8ToBigEndian(uint8_t val) {
|
||||
return {val};
|
||||
}
|
||||
// Returns a byte array holding the value of a uint16_t encoded as the
|
||||
// big-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 2u> U16ToBigEndian(uint16_t val) {
|
||||
return internal::ToLittleEndian(ByteSwap(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a uint32_t encoded as the
|
||||
// big-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 4u> U32ToBigEndian(uint32_t val) {
|
||||
return internal::ToLittleEndian(ByteSwap(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a uint64_t encoded as the
|
||||
// big-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 8u> U64ToBigEndian(uint64_t val) {
|
||||
return internal::ToLittleEndian(ByteSwap(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a int8_t encoded as the big-endian
|
||||
// encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 1u> I8ToBigEndian(int8_t val) {
|
||||
return {static_cast<uint8_t>(val)};
|
||||
}
|
||||
// Returns a byte array holding the value of a int16_t encoded as the
|
||||
// big-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 2u> I16ToBigEndian(int16_t val) {
|
||||
return internal::ToLittleEndian(ByteSwap(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a int32_t encoded as the
|
||||
// big-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 4u> I32ToBigEndian(int32_t val) {
|
||||
return internal::ToLittleEndian(ByteSwap(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a int64_t encoded as the
|
||||
// big-endian encoding of the integer.
|
||||
//
|
||||
// This is suitable for encoding integers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 8u> I64ToBigEndian(int64_t val) {
|
||||
return internal::ToLittleEndian(ByteSwap(val));
|
||||
}
|
||||
// Returns a byte array holding the value of a float encoded as the big-endian
|
||||
// encoding of the number.
|
||||
//
|
||||
// This is suitable for encoding numbers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 4u> FloatToBigEndian(float val) {
|
||||
return internal::ToLittleEndian(ByteSwap(std::bit_cast<uint32_t>(val)));
|
||||
}
|
||||
// Returns a byte array holding the value of a double encoded as the big-endian
|
||||
// encoding of the number.
|
||||
//
|
||||
// This is suitable for encoding numbers explicitly in big endian, such as for
|
||||
// network order. Use the native-endian versions when working with values that
|
||||
// are always in memory, such as when stored in shared-memory (or passed through
|
||||
// IPC) as a byte buffer. Use the little-endian encoding for storing and reading
|
||||
// from storage.
|
||||
inline constexpr std::array<uint8_t, 8u> DoubleToBigEndian(double val) {
|
||||
return internal::ToLittleEndian(ByteSwap(std::bit_cast<uint64_t>(val)));
|
||||
}
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_BYTE_CONVERSIONS_H_
|
553
deps/v8/src/base/numerics/byte_conversions_unittest.cc
vendored
Normal file
553
deps/v8/src/base/numerics/byte_conversions_unittest.cc
vendored
Normal file
|
@ -0,0 +1,553 @@
|
|||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#include "src/base/numerics/byte_conversions.h"
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace v8::base::numerics {
|
||||
|
||||
TEST(NumericsTest, FromNativeEndian) {
|
||||
// The implementation of FromNativeEndian and FromLittleEndian assumes the
|
||||
// native endian is little. If support of big endian is desired, compile-time
|
||||
// branches will need to be added to the implementation, and the test results
|
||||
// will differ there (they would match FromBigEndian in this test).
|
||||
static_assert(std::endian::native == std::endian::little);
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u};
|
||||
EXPECT_EQ(U8FromNativeEndian(bytes), 0x12u);
|
||||
static_assert(std::same_as<uint8_t, decltype(U8FromNativeEndian(bytes))>);
|
||||
static_assert(U8FromNativeEndian(bytes) == 0x12u);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u};
|
||||
EXPECT_EQ(U16FromNativeEndian(bytes), 0x34'12u);
|
||||
static_assert(std::same_as<uint16_t, decltype(U16FromNativeEndian(bytes))>);
|
||||
static_assert(U16FromNativeEndian(bytes) == 0x34'12u);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(U32FromNativeEndian(bytes), 0x78'56'34'12u);
|
||||
static_assert(std::same_as<uint32_t, decltype(U32FromNativeEndian(bytes))>);
|
||||
static_assert(U32FromNativeEndian(bytes) == 0x78'56'34'12u);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(U64FromNativeEndian(bytes), 0x56'34'12'90'78'56'34'12u);
|
||||
static_assert(std::same_as<uint64_t, decltype(U64FromNativeEndian(bytes))>);
|
||||
static_assert(U64FromNativeEndian(bytes) == 0x56'34'12'90'78'56'34'12u);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u};
|
||||
EXPECT_EQ(I8FromNativeEndian(bytes), 0x12);
|
||||
static_assert(std::same_as<int8_t, decltype(I8FromNativeEndian(bytes))>);
|
||||
static_assert(U8FromNativeEndian(bytes) == 0x12);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u};
|
||||
EXPECT_EQ(I16FromNativeEndian(bytes), 0x34'12);
|
||||
static_assert(std::same_as<int16_t, decltype(I16FromNativeEndian(bytes))>);
|
||||
static_assert(U16FromNativeEndian(bytes) == 0x34'12);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(I32FromNativeEndian(bytes), 0x78'56'34'12);
|
||||
static_assert(std::same_as<int32_t, decltype(I32FromNativeEndian(bytes))>);
|
||||
static_assert(U32FromNativeEndian(bytes) == 0x78'56'34'12);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(I64FromNativeEndian(bytes), 0x56'34'12'90'78'56'34'12);
|
||||
static_assert(std::same_as<int64_t, decltype(I64FromNativeEndian(bytes))>);
|
||||
static_assert(I64FromNativeEndian(bytes) == 0x56'34'12'90'78'56'34'12);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(FloatFromNativeEndian(bytes), 1.73782443614e+34f);
|
||||
EXPECT_EQ(std::bit_cast<uint32_t>(FloatFromNativeEndian(bytes)),
|
||||
0x78'56'34'12u);
|
||||
static_assert(std::same_as<float, decltype(FloatFromNativeEndian(bytes))>);
|
||||
static_assert(FloatFromNativeEndian(bytes) == 1.73782443614e+34f);
|
||||
static_assert(std::bit_cast<uint32_t>(FloatFromNativeEndian(bytes)) ==
|
||||
0x78'56'34'12u);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(DoubleFromNativeEndian(bytes),
|
||||
1.84145159269283616391989849435e107);
|
||||
EXPECT_EQ(std::bit_cast<uint64_t>(DoubleFromNativeEndian(bytes)),
|
||||
0x56'34'12'90'78'56'34'12u);
|
||||
static_assert(
|
||||
std::same_as<double, decltype(DoubleFromNativeEndian(bytes))>);
|
||||
static_assert(DoubleFromNativeEndian(bytes) ==
|
||||
1.84145159269283616391989849435e107);
|
||||
static_assert(std::bit_cast<uint64_t>(DoubleFromNativeEndian(bytes)) ==
|
||||
0x56'34'12'90'78'56'34'12u);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NumericsTest, FromLittleEndian) {
|
||||
// The implementation of FromNativeEndian and FromLittleEndian assumes the
|
||||
// native endian is little. If support of big endian is desired, compile-time
|
||||
// branches will need to be added to the implementation, and the test results
|
||||
// will differ there (they would match FromBigEndian in this test).
|
||||
static_assert(std::endian::native == std::endian::little);
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u};
|
||||
EXPECT_EQ(U8FromLittleEndian(bytes), 0x12u);
|
||||
static_assert(std::same_as<uint8_t, decltype(U8FromLittleEndian(bytes))>);
|
||||
static_assert(U8FromLittleEndian(bytes) == 0x12u);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u};
|
||||
EXPECT_EQ(U16FromLittleEndian(bytes), 0x34'12u);
|
||||
static_assert(std::same_as<uint16_t, decltype(U16FromLittleEndian(bytes))>);
|
||||
static_assert(U16FromLittleEndian(bytes) == 0x34'12u);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(U32FromLittleEndian(bytes), 0x78'56'34'12u);
|
||||
static_assert(std::same_as<uint32_t, decltype(U32FromLittleEndian(bytes))>);
|
||||
static_assert(U32FromLittleEndian(bytes) == 0x78'56'34'12u);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(U64FromLittleEndian(bytes), 0x56'34'12'90'78'56'34'12u);
|
||||
static_assert(std::same_as<uint64_t, decltype(U64FromLittleEndian(bytes))>);
|
||||
static_assert(U64FromLittleEndian(bytes) == 0x56'34'12'90'78'56'34'12u);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u};
|
||||
EXPECT_EQ(I8FromLittleEndian(bytes), 0x12);
|
||||
static_assert(std::same_as<int8_t, decltype(I8FromLittleEndian(bytes))>);
|
||||
static_assert(I8FromLittleEndian(bytes) == 0x12);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u};
|
||||
EXPECT_EQ(I16FromLittleEndian(bytes), 0x34'12);
|
||||
static_assert(std::same_as<int16_t, decltype(I16FromLittleEndian(bytes))>);
|
||||
static_assert(I16FromLittleEndian(bytes) == 0x34'12);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(I32FromLittleEndian(bytes), 0x78'56'34'12);
|
||||
static_assert(std::same_as<int32_t, decltype(I32FromLittleEndian(bytes))>);
|
||||
static_assert(I32FromLittleEndian(bytes) == 0x78'56'34'12);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(I64FromLittleEndian(bytes), 0x56'34'12'90'78'56'34'12);
|
||||
static_assert(std::same_as<int64_t, decltype(I64FromLittleEndian(bytes))>);
|
||||
static_assert(I64FromLittleEndian(bytes) == 0x56'34'12'90'78'56'34'12);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(FloatFromLittleEndian(bytes), 1.73782443614e+34f);
|
||||
EXPECT_EQ(std::bit_cast<uint32_t>(FloatFromLittleEndian(bytes)),
|
||||
0x78'56'34'12u);
|
||||
static_assert(std::same_as<float, decltype(FloatFromLittleEndian(bytes))>);
|
||||
static_assert(FloatFromLittleEndian(bytes) == 1.73782443614e+34f);
|
||||
static_assert(std::bit_cast<uint32_t>(FloatFromLittleEndian(bytes)) ==
|
||||
0x78'56'34'12u);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(DoubleFromLittleEndian(bytes),
|
||||
1.84145159269283616391989849435e107);
|
||||
EXPECT_EQ(std::bit_cast<uint64_t>(DoubleFromLittleEndian(bytes)),
|
||||
0x56'34'12'90'78'56'34'12u);
|
||||
static_assert(
|
||||
std::same_as<double, decltype(DoubleFromLittleEndian(bytes))>);
|
||||
static_assert(DoubleFromLittleEndian(bytes) ==
|
||||
1.84145159269283616391989849435e107);
|
||||
static_assert(std::bit_cast<uint64_t>(DoubleFromLittleEndian(bytes)) ==
|
||||
0x56'34'12'90'78'56'34'12u);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NumericsTest, FromBigEndian) {
|
||||
// The implementation of FromNativeEndian and FromLittleEndian assumes the
|
||||
// native endian is little. If support of big endian is desired, compile-time
|
||||
// branches will need to be added to the implementation, and the test results
|
||||
// will differ there (they would match FromLittleEndian in this test).
|
||||
static_assert(std::endian::native == std::endian::little);
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u};
|
||||
EXPECT_EQ(U8FromBigEndian(bytes), 0x12u);
|
||||
static_assert(U8FromBigEndian(bytes) == 0x12u);
|
||||
static_assert(std::same_as<uint8_t, decltype(U8FromBigEndian(bytes))>);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u};
|
||||
EXPECT_EQ(U16FromBigEndian(bytes), 0x12'34u);
|
||||
static_assert(U16FromBigEndian(bytes) == 0x12'34u);
|
||||
static_assert(std::same_as<uint16_t, decltype(U16FromBigEndian(bytes))>);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(U32FromBigEndian(bytes), 0x12'34'56'78u);
|
||||
static_assert(U32FromBigEndian(bytes) == 0x12'34'56'78u);
|
||||
static_assert(std::same_as<uint32_t, decltype(U32FromBigEndian(bytes))>);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(U64FromBigEndian(bytes), 0x12'34'56'78'90'12'34'56u);
|
||||
static_assert(U64FromBigEndian(bytes) == 0x12'34'56'78'90'12'34'56u);
|
||||
static_assert(std::same_as<uint64_t, decltype(U64FromBigEndian(bytes))>);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u};
|
||||
EXPECT_EQ(I8FromBigEndian(bytes), 0x12);
|
||||
static_assert(I8FromBigEndian(bytes) == 0x12);
|
||||
static_assert(std::same_as<int8_t, decltype(I8FromBigEndian(bytes))>);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u};
|
||||
EXPECT_EQ(I16FromBigEndian(bytes), 0x12'34);
|
||||
static_assert(I16FromBigEndian(bytes) == 0x12'34);
|
||||
static_assert(std::same_as<int16_t, decltype(I16FromBigEndian(bytes))>);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(I32FromBigEndian(bytes), 0x12'34'56'78);
|
||||
static_assert(I32FromBigEndian(bytes) == 0x12'34'56'78);
|
||||
static_assert(std::same_as<int32_t, decltype(I32FromBigEndian(bytes))>);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(I64FromBigEndian(bytes), 0x12'34'56'78'90'12'34'56);
|
||||
static_assert(I64FromBigEndian(bytes) == 0x12'34'56'78'90'12'34'56);
|
||||
static_assert(std::same_as<int64_t, decltype(I64FromBigEndian(bytes))>);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
EXPECT_EQ(FloatFromBigEndian(bytes), 5.6904566139e-28f);
|
||||
EXPECT_EQ(std::bit_cast<uint32_t>(FloatFromBigEndian(bytes)),
|
||||
0x12'34'56'78u);
|
||||
static_assert(std::same_as<float, decltype(FloatFromBigEndian(bytes))>);
|
||||
static_assert(FloatFromBigEndian(bytes) == 5.6904566139e-28f);
|
||||
static_assert(std::bit_cast<uint32_t>(FloatFromBigEndian(bytes)) ==
|
||||
0x12'34'56'78u);
|
||||
}
|
||||
{
|
||||
constexpr uint8_t bytes[] = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
EXPECT_EQ(DoubleFromBigEndian(bytes), 5.62634909901491201382066931077e-221);
|
||||
EXPECT_EQ(std::bit_cast<uint64_t>(DoubleFromBigEndian(bytes)),
|
||||
0x12'34'56'78'90'12'34'56u);
|
||||
static_assert(std::same_as<double, decltype(DoubleFromBigEndian(bytes))>);
|
||||
static_assert(DoubleFromBigEndian(bytes) ==
|
||||
5.62634909901491201382066931077e-221);
|
||||
static_assert(std::bit_cast<uint64_t>(DoubleFromBigEndian(bytes)) ==
|
||||
0x12'34'56'78'90'12'34'56u);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NumericsTest, ToNativeEndian) {
|
||||
// The implementation of ToNativeEndian and ToLittleEndian assumes the native
|
||||
// endian is little. If support of big endian is desired, compile-time
|
||||
// branches will need to be added to the implementation, and the test results
|
||||
// will differ there (they would match ToBigEndian in this test).
|
||||
static_assert(std::endian::native == std::endian::little);
|
||||
{
|
||||
constexpr std::array<uint8_t, 1u> bytes = {0x12u};
|
||||
constexpr auto val = uint8_t{0x12u};
|
||||
EXPECT_EQ(U8ToNativeEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 1u>, decltype(U8ToNativeEndian(val))>);
|
||||
static_assert(U8ToNativeEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 2u> bytes = {0x12u, 0x34u};
|
||||
constexpr auto val = uint16_t{0x34'12u};
|
||||
EXPECT_EQ(U16ToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 2u>,
|
||||
decltype(U16ToNativeEndian(val))>);
|
||||
static_assert(U16ToNativeEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr auto val = uint32_t{0x78'56'34'12u};
|
||||
EXPECT_EQ(U32ToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 4u>,
|
||||
decltype(U32ToNativeEndian(val))>);
|
||||
static_assert(U32ToNativeEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr auto val = uint64_t{0x56'34'12'90'78'56'34'12u};
|
||||
EXPECT_EQ(U64ToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 8u>,
|
||||
decltype(U64ToNativeEndian(val))>);
|
||||
static_assert(U64ToNativeEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 1u> bytes = {0x12u};
|
||||
constexpr auto val = int8_t{0x12};
|
||||
EXPECT_EQ(I8ToNativeEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 1u>, decltype(I8ToNativeEndian(val))>);
|
||||
static_assert(I8ToNativeEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 2u> bytes = {0x12u, 0x34u};
|
||||
constexpr auto val = int16_t{0x34'12};
|
||||
EXPECT_EQ(I16ToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 2u>,
|
||||
decltype(I16ToNativeEndian(val))>);
|
||||
static_assert(I16ToNativeEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr auto val = int32_t{0x78'56'34'12};
|
||||
EXPECT_EQ(I32ToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 4u>,
|
||||
decltype(I32ToNativeEndian(val))>);
|
||||
static_assert(I32ToNativeEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr auto val = int64_t{0x56'34'12'90'78'56'34'12};
|
||||
EXPECT_EQ(I64ToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 8u>,
|
||||
decltype(I64ToNativeEndian(val))>);
|
||||
static_assert(I64ToNativeEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr float val = 1.73782443614e+34f;
|
||||
EXPECT_EQ(FloatToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 4u>,
|
||||
decltype(FloatToNativeEndian(val))>);
|
||||
static_assert(FloatToNativeEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr double val = 1.84145159269283616391989849435e107;
|
||||
EXPECT_EQ(DoubleToNativeEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 8u>,
|
||||
decltype(DoubleToNativeEndian(val))>);
|
||||
static_assert(DoubleToNativeEndian(val) == bytes);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NumericsTest, ToLittleEndian) {
|
||||
// The implementation of ToNativeEndian and ToLittleEndian assumes the native
|
||||
// endian is little. If support of big endian is desired, compile-time
|
||||
// branches will need to be added to the implementation, and the test results
|
||||
// will differ there (they would match ToBigEndian in this test).
|
||||
static_assert(std::endian::native == std::endian::little);
|
||||
{
|
||||
constexpr std::array<uint8_t, 1u> bytes = {0x12u};
|
||||
constexpr auto val = uint8_t{0x12u};
|
||||
EXPECT_EQ(U8ToLittleEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 1u>, decltype(U8ToLittleEndian(val))>);
|
||||
static_assert(U8ToLittleEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 2u> bytes = {0x12u, 0x34u};
|
||||
constexpr auto val = uint16_t{0x34'12u};
|
||||
EXPECT_EQ(U16ToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 2u>,
|
||||
decltype(U16ToLittleEndian(val))>);
|
||||
static_assert(U16ToLittleEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr auto val = uint32_t{0x78'56'34'12u};
|
||||
EXPECT_EQ(U32ToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 4u>,
|
||||
decltype(U32ToLittleEndian(val))>);
|
||||
static_assert(U32ToLittleEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr auto val = uint64_t{0x56'34'12'90'78'56'34'12u};
|
||||
EXPECT_EQ(U64ToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 8u>,
|
||||
decltype(U64ToLittleEndian(val))>);
|
||||
static_assert(U64ToLittleEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 1u> bytes = {0x12u};
|
||||
constexpr auto val = int8_t{0x12};
|
||||
EXPECT_EQ(I8ToLittleEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 1u>, decltype(I8ToLittleEndian(val))>);
|
||||
static_assert(I8ToLittleEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 2u> bytes = {0x12u, 0x34u};
|
||||
constexpr auto val = int16_t{0x34'12};
|
||||
EXPECT_EQ(I16ToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 2u>,
|
||||
decltype(I16ToLittleEndian(val))>);
|
||||
static_assert(I16ToLittleEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr auto val = int32_t{0x78'56'34'12};
|
||||
EXPECT_EQ(I32ToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 4u>,
|
||||
decltype(I32ToLittleEndian(val))>);
|
||||
static_assert(I32ToLittleEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr auto val = int64_t{0x56'34'12'90'78'56'34'12};
|
||||
EXPECT_EQ(I64ToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 8u>,
|
||||
decltype(I64ToLittleEndian(val))>);
|
||||
static_assert(I64ToLittleEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr float val = 1.73782443614e+34f;
|
||||
EXPECT_EQ(FloatToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 4u>,
|
||||
decltype(FloatToLittleEndian(val))>);
|
||||
static_assert(FloatToLittleEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr double val = 1.84145159269283616391989849435e107;
|
||||
EXPECT_EQ(DoubleToLittleEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 8u>,
|
||||
decltype(DoubleToLittleEndian(val))>);
|
||||
static_assert(DoubleToLittleEndian(val) == bytes);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NumericsTest, ToBigEndian) {
|
||||
// The implementation of ToBigEndian assumes the native endian is little. If
|
||||
// support of big endian is desired, compile-time branches will need to be
|
||||
// added to the implementation, and the test results will differ there (they
|
||||
// would match ToLittleEndian in this test).
|
||||
static_assert(std::endian::native == std::endian::little);
|
||||
{
|
||||
constexpr std::array<uint8_t, 1u> bytes = {0x12u};
|
||||
constexpr auto val = uint8_t{0x12u};
|
||||
EXPECT_EQ(U8ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 1u>, decltype(U8ToBigEndian(val))>);
|
||||
static_assert(U8ToBigEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 2u> bytes = {0x12u, 0x34u};
|
||||
constexpr auto val = uint16_t{0x12'34u};
|
||||
EXPECT_EQ(U16ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 2u>, decltype(U16ToBigEndian(val))>);
|
||||
static_assert(U16ToBigEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr auto val = uint32_t{0x12'34'56'78u};
|
||||
EXPECT_EQ(U32ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 4u>, decltype(U32ToBigEndian(val))>);
|
||||
static_assert(U32ToBigEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr auto val = uint64_t{0x12'34'56'78'90'12'34'56u};
|
||||
EXPECT_EQ(U64ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 8u>, decltype(U64ToBigEndian(val))>);
|
||||
static_assert(U64ToBigEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 1u> bytes = {0x12u};
|
||||
constexpr auto val = int8_t{0x12u};
|
||||
EXPECT_EQ(I8ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 1u>, decltype(I8ToBigEndian(val))>);
|
||||
static_assert(I8ToBigEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 2u> bytes = {0x12u, 0x34u};
|
||||
constexpr auto val = int16_t{0x12'34u};
|
||||
EXPECT_EQ(I16ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 2u>, decltype(I16ToBigEndian(val))>);
|
||||
static_assert(I16ToBigEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr auto val = int32_t{0x12'34'56'78u};
|
||||
EXPECT_EQ(I32ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 4u>, decltype(I32ToBigEndian(val))>);
|
||||
static_assert(I32ToBigEndian(val) == bytes);
|
||||
}
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr auto val = int64_t{0x12'34'56'78'90'12'34'56u};
|
||||
EXPECT_EQ(I64ToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 8u>, decltype(I64ToBigEndian(val))>);
|
||||
static_assert(I64ToBigEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 4u> bytes = {0x12u, 0x34u, 0x56u, 0x78u};
|
||||
constexpr float val = 5.6904566139e-28f;
|
||||
EXPECT_EQ(FloatToBigEndian(val), bytes);
|
||||
static_assert(
|
||||
std::same_as<std::array<uint8_t, 4u>, decltype(FloatToBigEndian(val))>);
|
||||
static_assert(FloatToBigEndian(val) == bytes);
|
||||
}
|
||||
|
||||
{
|
||||
constexpr std::array<uint8_t, 8u> bytes = {0x12u, 0x34u, 0x56u, 0x78u,
|
||||
0x90u, 0x12u, 0x34u, 0x56u};
|
||||
constexpr double val = 5.62634909901491201382066931077e-221;
|
||||
EXPECT_EQ(DoubleToBigEndian(val), bytes);
|
||||
static_assert(std::same_as<std::array<uint8_t, 8u>,
|
||||
decltype(DoubleToBigEndian(val))>);
|
||||
static_assert(DoubleToBigEndian(val) == bytes);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace v8::base::numerics
|
375
deps/v8/src/base/numerics/checked_math.h
vendored
Normal file
375
deps/v8/src/base/numerics/checked_math.h
vendored
Normal file
|
@ -0,0 +1,375 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_CHECKED_MATH_H_
|
||||
#define V8_BASE_NUMERICS_CHECKED_MATH_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/numerics/checked_math_impl.h" // IWYU pragma: export
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
#include "src/base/numerics/safe_math_shared_impl.h" // IWYU pragma: export
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class CheckedNumeric {
|
||||
public:
|
||||
using type = T;
|
||||
|
||||
constexpr CheckedNumeric() = default;
|
||||
|
||||
// Copy constructor.
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
|
||||
: state_(rhs.state_.value(), rhs.IsValid()) {}
|
||||
|
||||
// This is not an explicit constructor because we implicitly upgrade regular
|
||||
// numerics to CheckedNumerics to make them easier to use.
|
||||
template <typename Src>
|
||||
requires(std::is_arithmetic_v<Src>)
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr CheckedNumeric(Src value) : state_(value) {}
|
||||
|
||||
// This is not an explicit constructor because we want a seamless conversion
|
||||
// from StrictNumeric types.
|
||||
template <typename Src>
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr CheckedNumeric(StrictNumeric<Src> value)
|
||||
: state_(static_cast<Src>(value)) {}
|
||||
|
||||
// IsValid() - The public API to test if a CheckedNumeric is currently valid.
|
||||
// A range checked destination type can be supplied using the Dst template
|
||||
// parameter.
|
||||
template <typename Dst = T>
|
||||
constexpr bool IsValid() const {
|
||||
return state_.is_valid() &&
|
||||
IsValueInRangeForNumericType<Dst>(state_.value());
|
||||
}
|
||||
|
||||
// AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
|
||||
// and is within the range supported by the destination type. Returns true if
|
||||
// successful and false otherwise.
|
||||
template <typename Dst>
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
__attribute__((warn_unused_result))
|
||||
#elif defined(_MSC_VER)
|
||||
_Check_return_
|
||||
#endif
|
||||
constexpr bool
|
||||
AssignIfValid(Dst* result) const {
|
||||
if (IsValid<Dst>()) [[likely]] {
|
||||
*result = static_cast<Dst>(state_.value());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// ValueOrDie() - The primary accessor for the underlying value. If the
|
||||
// current state is not valid it will CHECK and crash.
|
||||
// A range checked destination type can be supplied using the Dst template
|
||||
// parameter, which will trigger a CHECK if the value is not in bounds for
|
||||
// the destination.
|
||||
// The CHECK behavior can be overridden by supplying a handler as a
|
||||
// template parameter, for test code, etc. However, the handler cannot access
|
||||
// the underlying value, and it is not available through other means.
|
||||
template <typename Dst = T, class CheckHandler = CheckOnFailure>
|
||||
constexpr StrictNumeric<Dst> ValueOrDie() const {
|
||||
if (IsValid<Dst>()) [[likely]] {
|
||||
return static_cast<Dst>(state_.value());
|
||||
}
|
||||
return CheckHandler::template HandleFailure<Dst>();
|
||||
}
|
||||
|
||||
// ValueOrDefault(T default_value) - A convenience method that returns the
|
||||
// current value if the state is valid, and the supplied default_value for
|
||||
// any other state.
|
||||
// A range checked destination type can be supplied using the Dst template
|
||||
// parameter. WARNING: This function may fail to compile or CHECK at runtime
|
||||
// if the supplied default_value is not within range of the destination type.
|
||||
template <typename Dst = T, typename Src>
|
||||
constexpr StrictNumeric<Dst> ValueOrDefault(Src default_value) const {
|
||||
if (IsValid<Dst>()) [[likely]] {
|
||||
return static_cast<Dst>(state_.value());
|
||||
}
|
||||
return checked_cast<Dst>(default_value);
|
||||
}
|
||||
|
||||
// Returns a checked numeric of the specified type, cast from the current
|
||||
// CheckedNumeric. If the current state is invalid or the destination cannot
|
||||
// represent the result then the returned CheckedNumeric will be invalid.
|
||||
template <typename Dst>
|
||||
constexpr CheckedNumeric<UnderlyingType<Dst>> Cast() const {
|
||||
return *this;
|
||||
}
|
||||
|
||||
// This friend method is available solely for providing more detailed logging
|
||||
// in the tests. Do not implement it in production code, because the
|
||||
// underlying values may change at any time.
|
||||
template <typename U>
|
||||
friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
|
||||
|
||||
// Prototypes for the supported arithmetic operator overloads.
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator+=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator-=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator*=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator/=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator%=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator<<=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator>>=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator&=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator|=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric& operator^=(const Src rhs);
|
||||
|
||||
constexpr CheckedNumeric operator-() const {
|
||||
// Use an optimized code path for a known run-time variable.
|
||||
if (!std::is_constant_evaluated() && std::is_signed_v<T> &&
|
||||
std::is_floating_point_v<T>) {
|
||||
return FastRuntimeNegate();
|
||||
}
|
||||
// The negation of two's complement int min is int min.
|
||||
const bool is_valid =
|
||||
IsValid() &&
|
||||
(!std::is_signed_v<T> || std::is_floating_point_v<T> ||
|
||||
NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
|
||||
return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
|
||||
}
|
||||
|
||||
constexpr CheckedNumeric operator~() const {
|
||||
return CheckedNumeric<decltype(InvertWrapper(T()))>(
|
||||
InvertWrapper(state_.value()), IsValid());
|
||||
}
|
||||
|
||||
constexpr CheckedNumeric Abs() const {
|
||||
return !IsValueNegative(state_.value()) ? *this : -*this;
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
|
||||
U rhs) const {
|
||||
return CheckMax(*this, rhs);
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
|
||||
U rhs) const {
|
||||
return CheckMin(*this, rhs);
|
||||
}
|
||||
|
||||
// This function is available only for integral types. It returns an unsigned
|
||||
// integer of the same width as the source type, containing the absolute value
|
||||
// of the source, and properly handling signed min.
|
||||
constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
|
||||
UnsignedAbs() const {
|
||||
return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
|
||||
SafeUnsignedAbs(state_.value()), state_.is_valid());
|
||||
}
|
||||
|
||||
constexpr CheckedNumeric& operator++() {
|
||||
*this += 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr CheckedNumeric operator++(int) {
|
||||
const CheckedNumeric value = *this;
|
||||
++*this;
|
||||
return value;
|
||||
}
|
||||
|
||||
constexpr CheckedNumeric& operator--() {
|
||||
*this -= 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr CheckedNumeric operator--(int) {
|
||||
const CheckedNumeric value = *this;
|
||||
--*this;
|
||||
return value;
|
||||
}
|
||||
|
||||
// These perform the actual math operations on the CheckedNumerics.
|
||||
// Binary arithmetic operations.
|
||||
template <template <typename, typename> class M, typename L, typename R>
|
||||
static constexpr CheckedNumeric MathOp(L lhs, R rhs) {
|
||||
using Math = typename MathWrapper<M, L, R>::math;
|
||||
T result = 0;
|
||||
const bool is_valid =
|
||||
Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
|
||||
Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
|
||||
return CheckedNumeric<T>(result, is_valid);
|
||||
}
|
||||
|
||||
// Assignment arithmetic operations.
|
||||
template <template <typename, typename> class M, typename R>
|
||||
constexpr CheckedNumeric& MathOp(R rhs) {
|
||||
using Math = typename MathWrapper<M, T, R>::math;
|
||||
T result = 0; // Using T as the destination saves a range check.
|
||||
const bool is_valid =
|
||||
state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
|
||||
Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
|
||||
*this = CheckedNumeric<T>(result, is_valid);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename U>
|
||||
requires std::is_arithmetic_v<U>
|
||||
friend class CheckedNumeric;
|
||||
|
||||
CheckedNumericState<T> state_;
|
||||
|
||||
CheckedNumeric FastRuntimeNegate() const {
|
||||
T result;
|
||||
const bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
|
||||
return CheckedNumeric<T>(result, IsValid() && success);
|
||||
}
|
||||
|
||||
template <typename Src>
|
||||
constexpr CheckedNumeric(Src value, bool is_valid)
|
||||
: state_(value, is_valid) {}
|
||||
|
||||
// These wrappers allow us to handle state the same way for both
|
||||
// CheckedNumeric and POD arithmetic types.
|
||||
template <typename Src>
|
||||
struct Wrapper {
|
||||
static constexpr bool is_valid(Src) { return true; }
|
||||
static constexpr Src value(Src value) { return value; }
|
||||
};
|
||||
|
||||
template <typename Src>
|
||||
struct Wrapper<CheckedNumeric<Src>> {
|
||||
static constexpr bool is_valid(CheckedNumeric<Src> v) {
|
||||
return v.IsValid();
|
||||
}
|
||||
static constexpr Src value(CheckedNumeric<Src> v) {
|
||||
return v.state_.value();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Src>
|
||||
struct Wrapper<StrictNumeric<Src>> {
|
||||
static constexpr bool is_valid(StrictNumeric<Src>) { return true; }
|
||||
static constexpr Src value(StrictNumeric<Src> v) {
|
||||
return static_cast<Src>(v);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
CheckedNumeric(T) -> CheckedNumeric<T>;
|
||||
|
||||
// Convenience functions to avoid the ugly template disambiguator syntax.
|
||||
template <typename Dst, typename Src>
|
||||
constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
|
||||
return value.template IsValid<Dst>();
|
||||
}
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
constexpr StrictNumeric<Dst> ValueOrDieForType(
|
||||
const CheckedNumeric<Src> value) {
|
||||
return value.template ValueOrDie<Dst>();
|
||||
}
|
||||
|
||||
template <typename Dst, typename Src, typename Default>
|
||||
constexpr StrictNumeric<Dst> ValueOrDefaultForType(CheckedNumeric<Src> value,
|
||||
Default default_value) {
|
||||
return value.template ValueOrDefault<Dst>(default_value);
|
||||
}
|
||||
|
||||
// Convenience wrapper to return a new CheckedNumeric from the provided
|
||||
// arithmetic or CheckedNumericType.
|
||||
template <typename T>
|
||||
constexpr CheckedNumeric<UnderlyingType<T>> MakeCheckedNum(T value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
// These implement the variadic wrapper for the math operations.
|
||||
template <template <typename, typename> class M, typename L, typename R>
|
||||
constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(
|
||||
L lhs, R rhs) {
|
||||
using Math = typename MathWrapper<M, L, R>::math;
|
||||
return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
|
||||
rhs);
|
||||
}
|
||||
|
||||
// General purpose wrapper template for arithmetic operations.
|
||||
template <template <typename, typename> class M, typename L, typename R,
|
||||
typename... Args>
|
||||
constexpr auto CheckMathOp(L lhs, R rhs, Args... args) {
|
||||
return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
|
||||
}
|
||||
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
|
||||
BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
|
||||
BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
|
||||
|
||||
// These are some extra StrictNumeric operators to support simple pointer
|
||||
// arithmetic with our result types. Since wrapping on a pointer is always
|
||||
// bad, we trigger the CHECK condition here.
|
||||
template <typename L, typename R>
|
||||
L* operator+(L* lhs, StrictNumeric<R> rhs) {
|
||||
const uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
|
||||
CheckMul(sizeof(L), static_cast<R>(rhs)))
|
||||
.template ValueOrDie<uintptr_t>();
|
||||
return reinterpret_cast<L*>(result);
|
||||
}
|
||||
|
||||
template <typename L, typename R>
|
||||
L* operator-(L* lhs, StrictNumeric<R> rhs) {
|
||||
const uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
|
||||
CheckMul(sizeof(L), static_cast<R>(rhs)))
|
||||
.template ValueOrDie<uintptr_t>();
|
||||
return reinterpret_cast<L*>(result);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
using internal::CheckAdd;
|
||||
using internal::CheckAnd;
|
||||
using internal::CheckDiv;
|
||||
using internal::CheckedNumeric;
|
||||
using internal::CheckLsh;
|
||||
using internal::CheckMax;
|
||||
using internal::CheckMin;
|
||||
using internal::CheckMod;
|
||||
using internal::CheckMul;
|
||||
using internal::CheckOr;
|
||||
using internal::CheckRsh;
|
||||
using internal::CheckSub;
|
||||
using internal::CheckXor;
|
||||
using internal::IsValidForType;
|
||||
using internal::MakeCheckedNum;
|
||||
using internal::ValueOrDefaultForType;
|
||||
using internal::ValueOrDieForType;
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_CHECKED_MATH_H_
|
574
deps/v8/src/base/numerics/checked_math_impl.h
vendored
Normal file
574
deps/v8/src/base/numerics/checked_math_impl.h
vendored
Normal file
|
@ -0,0 +1,574 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
|
||||
|
||||
// IWYU pragma: private, include "src/base/numerics/checked_math.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <cmath>
|
||||
#include <concepts>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
#include "src/base/numerics/safe_math_shared_impl.h" // IWYU pragma: export
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
constexpr bool CheckedAddImpl(T x, T y, T* result) {
|
||||
static_assert(std::integral<T>, "Type must be integral");
|
||||
// Since the value of x+y is undefined if we have a signed type, we compute
|
||||
// it using the unsigned type of the same size.
|
||||
using UnsignedDst = typename std::make_unsigned<T>::type;
|
||||
using SignedDst = typename std::make_signed<T>::type;
|
||||
const UnsignedDst ux = static_cast<UnsignedDst>(x);
|
||||
const UnsignedDst uy = static_cast<UnsignedDst>(y);
|
||||
const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
|
||||
// Addition is valid if the sign of (x + y) is equal to either that of x or
|
||||
// that of y.
|
||||
if (std::is_signed_v<T>
|
||||
? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
|
||||
: uresult < uy) { // Unsigned is either valid or underflow.
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<T>(uresult);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedAddOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedAddOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
if constexpr (CheckedAddFastOp<T, U>::is_supported) {
|
||||
return CheckedAddFastOp<T, U>::Do(x, y, result);
|
||||
}
|
||||
|
||||
// Double the underlying type up to a full machine word.
|
||||
using FastPromotion = FastIntegerArithmeticPromotion<T, U>;
|
||||
using Promotion =
|
||||
std::conditional_t<(kIntegerBitsPlusSign<FastPromotion> >
|
||||
kIntegerBitsPlusSign<intptr_t>),
|
||||
BigEnoughPromotion<T, U>, FastPromotion>;
|
||||
// Fail if either operand is out of range for the promoted type.
|
||||
// TODO(jschuh): This could be made to work for a broader range of values.
|
||||
if (!IsValueInRangeForNumericType<Promotion>(x) ||
|
||||
!IsValueInRangeForNumericType<Promotion>(y)) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
Promotion presult = {};
|
||||
bool is_valid = true;
|
||||
if constexpr (kIsIntegerArithmeticSafe<Promotion, T, U>) {
|
||||
presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
|
||||
} else {
|
||||
is_valid = CheckedAddImpl(static_cast<Promotion>(x),
|
||||
static_cast<Promotion>(y), &presult);
|
||||
}
|
||||
if (!is_valid || !IsValueInRangeForNumericType<V>(presult)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(presult);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
constexpr bool CheckedSubImpl(T x, T y, T* result) {
|
||||
static_assert(std::integral<T>, "Type must be integral");
|
||||
// Since the value of x+y is undefined if we have a signed type, we compute
|
||||
// it using the unsigned type of the same size.
|
||||
using UnsignedDst = typename std::make_unsigned<T>::type;
|
||||
using SignedDst = typename std::make_signed<T>::type;
|
||||
const UnsignedDst ux = static_cast<UnsignedDst>(x);
|
||||
const UnsignedDst uy = static_cast<UnsignedDst>(y);
|
||||
const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
|
||||
// Subtraction is valid if either x and y have same sign, or (x-y) and x have
|
||||
// the same sign.
|
||||
if (std::is_signed_v<T>
|
||||
? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0
|
||||
: x < y) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<T>(uresult);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedSubOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedSubOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
if constexpr (CheckedSubFastOp<T, U>::is_supported) {
|
||||
return CheckedSubFastOp<T, U>::Do(x, y, result);
|
||||
}
|
||||
|
||||
// Double the underlying type up to a full machine word.
|
||||
using FastPromotion = FastIntegerArithmeticPromotion<T, U>;
|
||||
using Promotion =
|
||||
std::conditional_t<(kIntegerBitsPlusSign<FastPromotion> >
|
||||
kIntegerBitsPlusSign<intptr_t>),
|
||||
BigEnoughPromotion<T, U>, FastPromotion>;
|
||||
// Fail if either operand is out of range for the promoted type.
|
||||
// TODO(jschuh): This could be made to work for a broader range of values.
|
||||
if (!IsValueInRangeForNumericType<Promotion>(x) ||
|
||||
!IsValueInRangeForNumericType<Promotion>(y)) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
Promotion presult = {};
|
||||
bool is_valid = true;
|
||||
if constexpr (kIsIntegerArithmeticSafe<Promotion, T, U>) {
|
||||
presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
|
||||
} else {
|
||||
is_valid = CheckedSubImpl(static_cast<Promotion>(x),
|
||||
static_cast<Promotion>(y), &presult);
|
||||
}
|
||||
if (!is_valid || !IsValueInRangeForNumericType<V>(presult)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(presult);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
constexpr bool CheckedMulImpl(T x, T y, T* result) {
|
||||
static_assert(std::integral<T>, "Type must be integral");
|
||||
// Since the value of x*y is potentially undefined if we have a signed type,
|
||||
// we compute it using the unsigned type of the same size.
|
||||
using UnsignedDst = typename std::make_unsigned<T>::type;
|
||||
using SignedDst = typename std::make_signed<T>::type;
|
||||
const UnsignedDst ux = SafeUnsignedAbs(x);
|
||||
const UnsignedDst uy = SafeUnsignedAbs(y);
|
||||
const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
|
||||
const bool is_negative =
|
||||
std::is_signed_v<T> && static_cast<SignedDst>(x ^ y) < 0;
|
||||
// We have a fast out for unsigned identity or zero on the second operand.
|
||||
// After that it's an unsigned overflow check on the absolute value, with
|
||||
// a +1 bound for a negative result.
|
||||
if (uy > UnsignedDst(!std::is_signed_v<T> || is_negative) &&
|
||||
ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<T>(is_negative ? 0 - uresult : uresult);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedMulOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedMulOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
if constexpr (CheckedMulFastOp<T, U>::is_supported) {
|
||||
return CheckedMulFastOp<T, U>::Do(x, y, result);
|
||||
}
|
||||
|
||||
using Promotion = FastIntegerArithmeticPromotion<T, U>;
|
||||
// Verify the destination type can hold the result (always true for 0).
|
||||
if ((!IsValueInRangeForNumericType<Promotion>(x) ||
|
||||
!IsValueInRangeForNumericType<Promotion>(y)) &&
|
||||
x && y) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
Promotion presult = {};
|
||||
bool is_valid = true;
|
||||
if constexpr (CheckedMulFastOp<Promotion, Promotion>::is_supported) {
|
||||
// The fast op may be available with the promoted type.
|
||||
// The casts here are safe because of the "value in range" conditional
|
||||
// above.
|
||||
is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(
|
||||
static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
|
||||
} else if constexpr (kIsIntegerArithmeticSafe<Promotion, T, U>) {
|
||||
presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
|
||||
} else {
|
||||
is_valid = CheckedMulImpl(static_cast<Promotion>(x),
|
||||
static_cast<Promotion>(y), &presult);
|
||||
}
|
||||
if (!is_valid || !IsValueInRangeForNumericType<V>(presult)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(presult);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// Division just requires a check for a zero denominator or an invalid negation
|
||||
// on signed min/-1.
|
||||
template <typename T, typename U>
|
||||
struct CheckedDivOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedDivOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
if (!y) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The overflow check can be compiled away if we don't have the exact
|
||||
// combination of types needed to trigger this case.
|
||||
using Promotion = BigEnoughPromotion<T, U>;
|
||||
if (std::is_signed_v<T> && std::is_signed_v<U> &&
|
||||
kIsTypeInRangeForNumericType<T, Promotion> &&
|
||||
static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
|
||||
y == static_cast<U>(-1)) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
// This branch always compiles away if the above branch wasn't removed.
|
||||
if ((!IsValueInRangeForNumericType<Promotion>(x) ||
|
||||
!IsValueInRangeForNumericType<Promotion>(y)) &&
|
||||
x) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Promotion presult = Promotion(x) / Promotion(y);
|
||||
if (!IsValueInRangeForNumericType<V>(presult)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(presult);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedModOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedModOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
if (!y) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
using Promotion = BigEnoughPromotion<T, U>;
|
||||
if (std::is_signed_v<T> && std::is_signed_v<U> &&
|
||||
kIsTypeInRangeForNumericType<T, Promotion> &&
|
||||
static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
|
||||
y == static_cast<U>(-1)) [[unlikely]] {
|
||||
*result = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
const Promotion presult =
|
||||
static_cast<Promotion>(x) % static_cast<Promotion>(y);
|
||||
if (!IsValueInRangeForNumericType<V>(presult)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<Promotion>(presult);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedLshOp {};
|
||||
|
||||
// Left shift. Shifts less than 0 or greater than or equal to the number
|
||||
// of bits in the promoted type are undefined. Shifts of negative values
|
||||
// are undefined. Otherwise it is defined when the result fits.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedLshOp<T, U> {
|
||||
using result_type = T;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U shift, V* result) {
|
||||
// Disallow negative numbers and verify the shift is in bounds.
|
||||
if (!IsValueNegative(x) &&
|
||||
as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits))
|
||||
[[likely]] {
|
||||
// Shift as unsigned to avoid undefined behavior.
|
||||
*result = static_cast<V>(as_unsigned(x) << shift);
|
||||
// If the shift can be reversed, we know it was valid.
|
||||
return *result >> shift == x;
|
||||
}
|
||||
|
||||
// Handle the legal corner-case of a full-width signed shift of zero.
|
||||
if (!std::is_signed_v<T> || x ||
|
||||
as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits)) {
|
||||
return false;
|
||||
}
|
||||
*result = 0;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedRshOp {};
|
||||
|
||||
// Right shift. Shifts less than 0 or greater than or equal to the number
|
||||
// of bits in the promoted type are undefined. Otherwise, it is always defined,
|
||||
// but a right shift of a negative value is implementation-dependent.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedRshOp<T, U> {
|
||||
using result_type = T;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U shift, V* result) {
|
||||
// Use sign conversion to push negative values out of range.
|
||||
if (as_unsigned(shift) >= kIntegerBitsPlusSign<T>) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
const T tmp = x >> shift;
|
||||
if (!IsValueInRangeForNumericType<V>(tmp)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(tmp);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedAndOp {};
|
||||
|
||||
// For simplicity we support only unsigned integer results.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedAndOp<T, U> {
|
||||
using result_type = std::make_unsigned_t<MaxExponentPromotion<T, U>>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
const result_type tmp =
|
||||
static_cast<result_type>(x) & static_cast<result_type>(y);
|
||||
if (!IsValueInRangeForNumericType<V>(tmp)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(tmp);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedOrOp {};
|
||||
|
||||
// For simplicity we support only unsigned integers.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedOrOp<T, U> {
|
||||
using result_type = std::make_unsigned_t<MaxExponentPromotion<T, U>>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
const result_type tmp =
|
||||
static_cast<result_type>(x) | static_cast<result_type>(y);
|
||||
if (!IsValueInRangeForNumericType<V>(tmp)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(tmp);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedXorOp {};
|
||||
|
||||
// For simplicity we support only unsigned integers.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct CheckedXorOp<T, U> {
|
||||
using result_type = std::make_unsigned_t<MaxExponentPromotion<T, U>>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
const result_type tmp =
|
||||
static_cast<result_type>(x) ^ static_cast<result_type>(y);
|
||||
if (!IsValueInRangeForNumericType<V>(tmp)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(tmp);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// Max doesn't really need to be implemented this way because it can't fail,
|
||||
// but it makes the code much cleaner to use the MathOp wrappers.
|
||||
template <typename T, typename U>
|
||||
struct CheckedMaxOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::is_arithmetic_v<T> && std::is_arithmetic_v<U>)
|
||||
struct CheckedMaxOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
const result_type tmp = IsGreater<T, U>::Test(x, y)
|
||||
? static_cast<result_type>(x)
|
||||
: static_cast<result_type>(y);
|
||||
if (!IsValueInRangeForNumericType<V>(tmp)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(tmp);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// Min doesn't really need to be implemented this way because it can't fail,
|
||||
// but it makes the code much cleaner to use the MathOp wrappers.
|
||||
template <typename T, typename U>
|
||||
struct CheckedMinOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::is_arithmetic_v<T> && std::is_arithmetic_v<U>)
|
||||
struct CheckedMinOp<T, U> {
|
||||
using result_type = LowestValuePromotion<T, U>;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
const result_type tmp = IsLess<T, U>::Test(x, y)
|
||||
? static_cast<result_type>(x)
|
||||
: static_cast<result_type>(y);
|
||||
if (!IsValueInRangeForNumericType<V>(tmp)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(tmp);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// This is just boilerplate that wraps the standard floating point arithmetic.
|
||||
// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
|
||||
#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
|
||||
template <typename T, typename U> \
|
||||
requires(std::floating_point<T> || std::floating_point<U>) \
|
||||
struct Checked##NAME##Op<T, U> { \
|
||||
using result_type = MaxExponentPromotion<T, U>; \
|
||||
template <typename V> \
|
||||
static constexpr bool Do(T x, U y, V* result) { \
|
||||
const result_type presult = x OP y; \
|
||||
if (!IsValueInRangeForNumericType<V>(presult)) return false; \
|
||||
*result = static_cast<V>(presult); \
|
||||
return true; \
|
||||
} \
|
||||
};
|
||||
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Add, +)
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Div, /)
|
||||
|
||||
#undef BASE_FLOAT_ARITHMETIC_OPS
|
||||
|
||||
// Floats carry around their validity state with them, but integers do not. So,
|
||||
// we wrap the underlying value in a specialization in order to hide that detail
|
||||
// and expose an interface via accessors.
|
||||
enum NumericRepresentation {
|
||||
NUMERIC_INTEGER,
|
||||
NUMERIC_FLOATING,
|
||||
NUMERIC_UNKNOWN
|
||||
};
|
||||
|
||||
template <typename NumericType>
|
||||
struct GetNumericRepresentation {
|
||||
static const NumericRepresentation value =
|
||||
std::integral<NumericType>
|
||||
? NUMERIC_INTEGER
|
||||
: (std::floating_point<NumericType> ? NUMERIC_FLOATING
|
||||
: NUMERIC_UNKNOWN);
|
||||
};
|
||||
|
||||
template <typename T,
|
||||
NumericRepresentation type = GetNumericRepresentation<T>::value>
|
||||
class CheckedNumericState {};
|
||||
|
||||
// Integrals require quite a bit of additional housekeeping to manage state.
|
||||
template <typename T>
|
||||
class CheckedNumericState<T, NUMERIC_INTEGER> {
|
||||
public:
|
||||
template <typename Src = int>
|
||||
constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
|
||||
: is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
|
||||
value_(WellDefinedConversionOrZero(value, is_valid_)) {
|
||||
static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
|
||||
}
|
||||
|
||||
template <typename Src>
|
||||
constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
|
||||
: CheckedNumericState(rhs.value(), rhs.is_valid()) {}
|
||||
|
||||
constexpr bool is_valid() const { return is_valid_; }
|
||||
|
||||
constexpr T value() const { return value_; }
|
||||
|
||||
private:
|
||||
// Ensures that a type conversion does not trigger undefined behavior.
|
||||
template <typename Src>
|
||||
static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid) {
|
||||
return (std::integral<UnderlyingType<Src>> || is_valid)
|
||||
? static_cast<T>(value)
|
||||
: 0;
|
||||
}
|
||||
|
||||
// is_valid_ precedes value_ because member initializers in the constructors
|
||||
// are evaluated in field order, and is_valid_ must be read when initializing
|
||||
// value_.
|
||||
bool is_valid_;
|
||||
T value_;
|
||||
};
|
||||
|
||||
// Floating points maintain their own validity, but need translation wrappers.
|
||||
template <typename T>
|
||||
class CheckedNumericState<T, NUMERIC_FLOATING> {
|
||||
public:
|
||||
template <typename Src = double>
|
||||
constexpr explicit CheckedNumericState(Src value = 0.0, bool is_valid = true)
|
||||
: value_(WellDefinedConversionOrNaN(
|
||||
value, is_valid && IsValueInRangeForNumericType<T>(value))) {}
|
||||
|
||||
template <typename Src>
|
||||
constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
|
||||
: CheckedNumericState(rhs.value(), rhs.is_valid()) {}
|
||||
|
||||
constexpr bool is_valid() const {
|
||||
// Written this way because std::isfinite is not constexpr before C++23.
|
||||
// TODO(C++23): Use `std::isfinite()` unconditionally.
|
||||
return std::is_constant_evaluated()
|
||||
? value_ <= std::numeric_limits<T>::max() &&
|
||||
value_ >= std::numeric_limits<T>::lowest()
|
||||
: std::isfinite(value_);
|
||||
}
|
||||
|
||||
constexpr T value() const { return value_; }
|
||||
|
||||
private:
|
||||
// Ensures that a type conversion does not trigger undefined behavior.
|
||||
template <typename Src>
|
||||
static constexpr T WellDefinedConversionOrNaN(Src value, bool is_valid) {
|
||||
return (kStaticDstRangeRelationToSrcRange<T, UnderlyingType<Src>> ==
|
||||
NumericRangeRepresentation::kContained ||
|
||||
is_valid)
|
||||
? static_cast<T>(value)
|
||||
: std::numeric_limits<T>::quiet_NaN();
|
||||
}
|
||||
|
||||
T value_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
|
247
deps/v8/src/base/numerics/clamped_math.h
vendored
Normal file
247
deps/v8/src/base/numerics/clamped_math.h
vendored
Normal file
|
@ -0,0 +1,247 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_CLAMPED_MATH_H_
|
||||
#define V8_BASE_NUMERICS_CLAMPED_MATH_H_
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/numerics/clamped_math_impl.h" // IWYU pragma: export
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
#include "src/base/numerics/safe_math_shared_impl.h" // IWYU pragma: export
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class ClampedNumeric {
|
||||
public:
|
||||
using type = T;
|
||||
|
||||
constexpr ClampedNumeric() = default;
|
||||
|
||||
// Copy constructor.
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric(const ClampedNumeric<Src>& rhs)
|
||||
: value_(saturated_cast<T>(rhs.value_)) {}
|
||||
|
||||
// This is not an explicit constructor because we implicitly upgrade regular
|
||||
// numerics to ClampedNumerics to make them easier to use.
|
||||
template <typename Src>
|
||||
requires(IsNumeric<Src>)
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr ClampedNumeric(Src value) : value_(saturated_cast<T>(value)) {}
|
||||
|
||||
// This is not an explicit constructor because we want a seamless conversion
|
||||
// from StrictNumeric types.
|
||||
template <typename Src>
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr ClampedNumeric(StrictNumeric<Src> value)
|
||||
: value_(saturated_cast<T>(static_cast<Src>(value))) {}
|
||||
|
||||
// Returns a ClampedNumeric of the specified type, cast from the current
|
||||
// ClampedNumeric, and saturated to the destination type.
|
||||
template <typename Dst>
|
||||
constexpr ClampedNumeric<UnderlyingType<Dst>> Cast() const {
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Prototypes for the supported arithmetic operator overloads.
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator+=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator-=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator*=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator/=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator%=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator<<=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator>>=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator&=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator|=(const Src rhs);
|
||||
template <typename Src>
|
||||
constexpr ClampedNumeric& operator^=(const Src rhs);
|
||||
|
||||
constexpr ClampedNumeric operator-() const {
|
||||
// The negation of two's complement int min is int min, so that's the
|
||||
// only overflow case where we will saturate.
|
||||
return ClampedNumeric<T>(SaturatedNegWrapper(value_));
|
||||
}
|
||||
|
||||
constexpr ClampedNumeric operator~() const {
|
||||
return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
|
||||
}
|
||||
|
||||
constexpr ClampedNumeric Abs() const {
|
||||
// The negation of two's complement int min is int min, so that's the
|
||||
// only overflow case where we will saturate.
|
||||
return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(
|
||||
U rhs) const {
|
||||
using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
|
||||
return ClampedNumeric<result_type>(
|
||||
ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(
|
||||
U rhs) const {
|
||||
using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
|
||||
return ClampedNumeric<result_type>(
|
||||
ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
|
||||
}
|
||||
|
||||
// This function is available only for integral types. It returns an unsigned
|
||||
// integer of the same width as the source type, containing the absolute value
|
||||
// of the source, and properly handling signed min.
|
||||
constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>
|
||||
UnsignedAbs() const {
|
||||
return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(
|
||||
SafeUnsignedAbs(value_));
|
||||
}
|
||||
|
||||
constexpr ClampedNumeric& operator++() {
|
||||
*this += 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ClampedNumeric operator++(int) {
|
||||
ClampedNumeric value = *this;
|
||||
*this += 1;
|
||||
return value;
|
||||
}
|
||||
|
||||
constexpr ClampedNumeric& operator--() {
|
||||
*this -= 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ClampedNumeric operator--(int) {
|
||||
ClampedNumeric value = *this;
|
||||
*this -= 1;
|
||||
return value;
|
||||
}
|
||||
|
||||
// These perform the actual math operations on the ClampedNumerics.
|
||||
// Binary arithmetic operations.
|
||||
template <template <typename, typename> class M, typename L, typename R>
|
||||
static constexpr ClampedNumeric MathOp(L lhs, R rhs) {
|
||||
using Math = typename MathWrapper<M, L, R>::math;
|
||||
return ClampedNumeric<T>(
|
||||
Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
|
||||
}
|
||||
|
||||
// Assignment arithmetic operations.
|
||||
template <template <typename, typename> class M, typename R>
|
||||
constexpr ClampedNumeric& MathOp(R rhs) {
|
||||
using Math = typename MathWrapper<M, T, R>::math;
|
||||
*this =
|
||||
ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename Dst>
|
||||
requires std::is_arithmetic_v<ArithmeticOrUnderlyingEnum<Dst>>
|
||||
constexpr operator Dst() const { // NOLINT(runtime/explicit)
|
||||
return saturated_cast<ArithmeticOrUnderlyingEnum<Dst>>(value_);
|
||||
}
|
||||
|
||||
// This method extracts the raw integer value without saturating it to the
|
||||
// destination type as the conversion operator does. This is useful when
|
||||
// e.g. assigning to an auto type or passing as a deduced template parameter.
|
||||
constexpr T RawValue() const { return value_; }
|
||||
|
||||
private:
|
||||
template <typename U>
|
||||
requires std::is_arithmetic_v<U>
|
||||
friend class ClampedNumeric;
|
||||
|
||||
T value_ = 0;
|
||||
|
||||
// These wrappers allow us to handle state the same way for both
|
||||
// ClampedNumeric and POD arithmetic types.
|
||||
template <typename Src>
|
||||
struct Wrapper {
|
||||
static constexpr UnderlyingType<Src> value(Src value) { return value; }
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
ClampedNumeric(T) -> ClampedNumeric<T>;
|
||||
|
||||
// Convenience wrapper to return a new ClampedNumeric from the provided
|
||||
// arithmetic or ClampedNumericType.
|
||||
template <typename T>
|
||||
constexpr ClampedNumeric<UnderlyingType<T>> MakeClampedNum(T value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
// These implement the variadic wrapper for the math operations.
|
||||
template <template <typename, typename> class M, typename L, typename R>
|
||||
constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(
|
||||
L lhs, R rhs) {
|
||||
using Math = typename MathWrapper<M, L, R>::math;
|
||||
return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
|
||||
rhs);
|
||||
}
|
||||
|
||||
// General purpose wrapper template for arithmetic operations.
|
||||
template <template <typename, typename> class M, typename L, typename R,
|
||||
typename... Args>
|
||||
constexpr auto ClampMathOp(L lhs, R rhs, Args... args) {
|
||||
return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
|
||||
}
|
||||
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
|
||||
BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
|
||||
BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
|
||||
BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
|
||||
BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <)
|
||||
BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=)
|
||||
BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >)
|
||||
BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=)
|
||||
BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==)
|
||||
BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=)
|
||||
|
||||
} // namespace internal
|
||||
|
||||
using internal::ClampAdd;
|
||||
using internal::ClampAnd;
|
||||
using internal::ClampDiv;
|
||||
using internal::ClampedNumeric;
|
||||
using internal::ClampLsh;
|
||||
using internal::ClampMax;
|
||||
using internal::ClampMin;
|
||||
using internal::ClampMod;
|
||||
using internal::ClampMul;
|
||||
using internal::ClampOr;
|
||||
using internal::ClampRsh;
|
||||
using internal::ClampSub;
|
||||
using internal::ClampXor;
|
||||
using internal::MakeClampedNum;
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_CLAMPED_MATH_H_
|
304
deps/v8/src/base/numerics/clamped_math_impl.h
vendored
Normal file
304
deps/v8/src/base/numerics/clamped_math_impl.h
vendored
Normal file
|
@ -0,0 +1,304 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
|
||||
|
||||
// IWYU pragma: private, include "src/base/numerics/clamped_math.h"
|
||||
|
||||
#include <concepts>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/numerics/checked_math.h"
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
#include "src/base/numerics/safe_math_shared_impl.h" // IWYU pragma: export
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
requires(std::signed_integral<T>)
|
||||
constexpr T SaturatedNegWrapper(T value) {
|
||||
return std::is_constant_evaluated() || !ClampedNegFastOp<T>::is_supported
|
||||
? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
|
||||
? NegateWrapper(value)
|
||||
: std::numeric_limits<T>::max())
|
||||
: ClampedNegFastOp<T>::Do(value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::unsigned_integral<T>)
|
||||
constexpr T SaturatedNegWrapper(T value) {
|
||||
return T(0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::floating_point<T>)
|
||||
constexpr T SaturatedNegWrapper(T value) {
|
||||
return -value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::integral<T>)
|
||||
constexpr T SaturatedAbsWrapper(T value) {
|
||||
// The calculation below is a static identity for unsigned types, but for
|
||||
// signed integer types it provides a non-branching, saturated absolute value.
|
||||
// This works because SafeUnsignedAbs() returns an unsigned type, which can
|
||||
// represent the absolute value of all negative numbers of an equal-width
|
||||
// integer type. The call to IsValueNegative() then detects overflow in the
|
||||
// special case of numeric_limits<T>::min(), by evaluating the bit pattern as
|
||||
// a signed integer value. If it is the overflow case, we end up subtracting
|
||||
// one from the unsigned result, thus saturating to numeric_limits<T>::max().
|
||||
return static_cast<T>(
|
||||
SafeUnsignedAbs(value) -
|
||||
IsValueNegative<T>(static_cast<T>(SafeUnsignedAbs(value))));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::floating_point<T>)
|
||||
constexpr T SaturatedAbsWrapper(T value) {
|
||||
return value < 0 ? -value : value;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedAddOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedAddOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V = result_type>
|
||||
requires(std::same_as<V, result_type> || kIsTypeInRangeForNumericType<U, V>)
|
||||
static constexpr V Do(T x, U y) {
|
||||
if (!std::is_constant_evaluated() && ClampedAddFastOp<T, U>::is_supported) {
|
||||
return ClampedAddFastOp<T, U>::template Do<V>(x, y);
|
||||
}
|
||||
const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
|
||||
V result = {};
|
||||
if (CheckedAddOp<T, U>::Do(x, y, &result)) [[likely]] {
|
||||
return result;
|
||||
}
|
||||
return saturated;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedSubOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedSubOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V = result_type>
|
||||
requires(std::same_as<V, result_type> || kIsTypeInRangeForNumericType<U, V>)
|
||||
static constexpr V Do(T x, U y) {
|
||||
if (!std::is_constant_evaluated() && ClampedSubFastOp<T, U>::is_supported) {
|
||||
return ClampedSubFastOp<T, U>::template Do<V>(x, y);
|
||||
}
|
||||
const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
|
||||
V result = {};
|
||||
if (CheckedSubOp<T, U>::Do(x, y, &result)) [[likely]] {
|
||||
return result;
|
||||
}
|
||||
return saturated;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedMulOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedMulOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V = result_type>
|
||||
static constexpr V Do(T x, U y) {
|
||||
if (!std::is_constant_evaluated() && ClampedMulFastOp<T, U>::is_supported) {
|
||||
return ClampedMulFastOp<T, U>::template Do<V>(x, y);
|
||||
}
|
||||
V result = {};
|
||||
const V saturated =
|
||||
CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
|
||||
if (CheckedMulOp<T, U>::Do(x, y, &result)) [[likely]] {
|
||||
return result;
|
||||
}
|
||||
return saturated;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedDivOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedDivOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V = result_type>
|
||||
static constexpr V Do(T x, U y) {
|
||||
V result = {};
|
||||
if ((CheckedDivOp<T, U>::Do(x, y, &result))) [[likely]] {
|
||||
return result;
|
||||
}
|
||||
// Saturation goes to max, min, or NaN (if x is zero).
|
||||
return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
|
||||
: SaturationDefaultLimits<V>::NaN();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedModOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedModOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V = result_type>
|
||||
static constexpr V Do(T x, U y) {
|
||||
V result = {};
|
||||
if (CheckedModOp<T, U>::Do(x, y, &result)) [[likely]] {
|
||||
return result;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedLshOp {};
|
||||
|
||||
// Left shift. Non-zero values saturate in the direction of the sign. A zero
|
||||
// shifted by any value always results in zero.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::unsigned_integral<U>)
|
||||
struct ClampedLshOp<T, U> {
|
||||
using result_type = T;
|
||||
template <typename V = result_type>
|
||||
static constexpr V Do(T x, U shift) {
|
||||
if (shift < std::numeric_limits<T>::digits) [[likely]] {
|
||||
// Shift as unsigned to avoid undefined behavior.
|
||||
V result = static_cast<V>(as_unsigned(x) << shift);
|
||||
// If the shift can be reversed, we know it was valid.
|
||||
if (result >> shift == x) [[likely]] {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedRshOp {};
|
||||
|
||||
// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::unsigned_integral<U>)
|
||||
struct ClampedRshOp<T, U> {
|
||||
using result_type = T;
|
||||
template <typename V = result_type>
|
||||
static constexpr V Do(T x, U shift) {
|
||||
// Signed right shift is odd, because it saturates to -1 or 0.
|
||||
const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
|
||||
if (shift < kIntegerBitsPlusSign<T>) [[likely]] {
|
||||
return saturated_cast<V>(x >> shift);
|
||||
}
|
||||
return saturated;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedAndOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedAndOp<T, U> {
|
||||
using result_type = std::make_unsigned_t<MaxExponentPromotion<T, U>>;
|
||||
template <typename V>
|
||||
static constexpr V Do(T x, U y) {
|
||||
return static_cast<result_type>(x) & static_cast<result_type>(y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedOrOp {};
|
||||
|
||||
// For simplicity we promote to unsigned integers.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedOrOp<T, U> {
|
||||
using result_type = std::make_unsigned_t<MaxExponentPromotion<T, U>>;
|
||||
template <typename V>
|
||||
static constexpr V Do(T x, U y) {
|
||||
return static_cast<result_type>(x) | static_cast<result_type>(y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedXorOp {};
|
||||
|
||||
// For simplicity we support only unsigned integers.
|
||||
template <typename T, typename U>
|
||||
requires(std::integral<T> && std::integral<U>)
|
||||
struct ClampedXorOp<T, U> {
|
||||
using result_type = std::make_unsigned_t<MaxExponentPromotion<T, U>>;
|
||||
template <typename V>
|
||||
static constexpr V Do(T x, U y) {
|
||||
return static_cast<result_type>(x) ^ static_cast<result_type>(y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedMaxOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::is_arithmetic_v<T> && std::is_arithmetic_v<U>)
|
||||
struct ClampedMaxOp<T, U> {
|
||||
using result_type = MaxExponentPromotion<T, U>;
|
||||
template <typename V = result_type>
|
||||
static constexpr V Do(T x, U y) {
|
||||
return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x)
|
||||
: saturated_cast<V>(y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedMinOp {};
|
||||
|
||||
template <typename T, typename U>
|
||||
requires(std::is_arithmetic_v<T> && std::is_arithmetic_v<U>)
|
||||
struct ClampedMinOp<T, U> {
|
||||
using result_type = LowestValuePromotion<T, U>;
|
||||
template <typename V = result_type>
|
||||
static constexpr V Do(T x, U y) {
|
||||
return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x)
|
||||
: saturated_cast<V>(y);
|
||||
}
|
||||
};
|
||||
|
||||
// This is just boilerplate that wraps the standard floating point arithmetic.
|
||||
// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
|
||||
#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
|
||||
template <typename T, typename U> \
|
||||
requires(std::floating_point<T> || std::floating_point<U>) \
|
||||
struct Clamped##NAME##Op<T, U> { \
|
||||
using result_type = MaxExponentPromotion<T, U>; \
|
||||
template <typename V = result_type> \
|
||||
static constexpr V Do(T x, U y) { \
|
||||
return saturated_cast<V>(x OP y); \
|
||||
} \
|
||||
};
|
||||
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Add, +)
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
|
||||
BASE_FLOAT_ARITHMETIC_OPS(Div, /)
|
||||
|
||||
#undef BASE_FLOAT_ARITHMETIC_OPS
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
|
28
deps/v8/src/base/numerics/integral_constant_like.h
vendored
Normal file
28
deps/v8/src/base/numerics/integral_constant_like.h
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_INTEGRAL_CONSTANT_LIKE_H_
|
||||
#define V8_BASE_NUMERICS_INTEGRAL_CONSTANT_LIKE_H_
|
||||
|
||||
#include <concepts>
|
||||
#include <type_traits>
|
||||
|
||||
namespace v8::base {
|
||||
|
||||
// Exposition-only concept from [span.syn]
|
||||
template <typename T>
|
||||
concept IntegralConstantLike =
|
||||
std::is_integral_v<decltype(T::value)> &&
|
||||
!std::is_same_v<bool, std::remove_const_t<decltype(T::value)>> &&
|
||||
std::convertible_to<T, decltype(T::value)> &&
|
||||
std::equality_comparable_with<T, decltype(T::value)> &&
|
||||
std::bool_constant<T() == T::value>::value &&
|
||||
std::bool_constant<static_cast<decltype(T::value)>(T()) == T::value>::value;
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_INTEGRAL_CONSTANT_LIKE_H_
|
18
deps/v8/src/base/numerics/math_constants.h
vendored
Normal file
18
deps/v8/src/base/numerics/math_constants.h
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_MATH_CONSTANTS_H_
|
||||
#define V8_BASE_NUMERICS_MATH_CONSTANTS_H_
|
||||
|
||||
namespace v8::base {
|
||||
// The mean acceleration due to gravity on Earth in m/s^2.
|
||||
constexpr double kMeanGravityDouble = 9.80665;
|
||||
constexpr float kMeanGravityFloat = 9.80665f;
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_MATH_CONSTANTS_H_
|
41
deps/v8/src/base/numerics/ostream_operators.h
vendored
Normal file
41
deps/v8/src/base/numerics/ostream_operators.h
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2021 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_OSTREAM_OPERATORS_H_
|
||||
#define V8_BASE_NUMERICS_OSTREAM_OPERATORS_H_
|
||||
|
||||
#include <ostream>
|
||||
#include <type_traits>
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class ClampedNumeric;
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class StrictNumeric;
|
||||
|
||||
// Overload the ostream output operator to make logging work nicely.
|
||||
template <typename T>
|
||||
std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
|
||||
os << static_cast<T>(value);
|
||||
return os;
|
||||
}
|
||||
|
||||
// Overload the ostream output operator to make logging work nicely.
|
||||
template <typename T>
|
||||
std::ostream& operator<<(std::ostream& os, const ClampedNumeric<T>& value) {
|
||||
os << static_cast<T>(value);
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_OSTREAM_OPERATORS_H_
|
24
deps/v8/src/base/numerics/ranges.h
vendored
Normal file
24
deps/v8/src/base/numerics/ranges.h
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_RANGES_H_
|
||||
#define V8_BASE_NUMERICS_RANGES_H_
|
||||
|
||||
#include <cmath>
|
||||
#include <type_traits>
|
||||
|
||||
namespace v8::base {
|
||||
|
||||
template <typename T>
|
||||
constexpr bool IsApproximatelyEqual(T lhs, T rhs, T tolerance) {
|
||||
static_assert(std::is_arithmetic_v<T>, "Argument must be arithmetic");
|
||||
return std::abs(rhs - lhs) <= tolerance;
|
||||
}
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_RANGES_H_
|
|
@ -1,17 +1,12 @@
|
|||
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// List of adaptations:
|
||||
// - include guard names
|
||||
// - wrap in v8 namespace
|
||||
// - formatting (git cl format)
|
||||
// - include paths
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_SAFE_CONVERSIONS_H_
|
||||
#define V8_BASE_SAFE_CONVERSIONS_H_
|
||||
#ifndef V8_BASE_NUMERICS_SAFE_CONVERSIONS_H_
|
||||
#define V8_BASE_NUMERICS_SAFE_CONVERSIONS_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
|
@ -20,10 +15,10 @@
|
|||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/safe_conversions_impl.h"
|
||||
#include "src/base/numerics/safe_conversions_impl.h" // IWYU pragma: export
|
||||
|
||||
#if defined(__ARMEL__) && !defined(__native_client__)
|
||||
#include "src/base/safe_conversions_arm_impl.h"
|
||||
#include "src/base/numerics/safe_conversions_arm_impl.h" // IWYU pragma: export
|
||||
#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
|
||||
#else
|
||||
#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
|
||||
|
@ -49,7 +44,7 @@ struct SaturateFastAsmOp {
|
|||
template <typename Dst, typename Src>
|
||||
struct IsValueInRangeFastOp {
|
||||
static constexpr bool is_supported = false;
|
||||
static constexpr bool Do(Src value) {
|
||||
static constexpr bool Do(Src) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<bool>();
|
||||
}
|
||||
|
@ -58,7 +53,7 @@ struct IsValueInRangeFastOp {
|
|||
// Signed to signed range comparison.
|
||||
template <typename Dst, typename Src>
|
||||
requires(std::signed_integral<Dst> && std::signed_integral<Src> &&
|
||||
!IsTypeInRangeForNumericType<Dst, Src>::value)
|
||||
!kIsTypeInRangeForNumericType<Dst, Src>)
|
||||
struct IsValueInRangeFastOp<Dst, Src> {
|
||||
static constexpr bool is_supported = true;
|
||||
|
||||
|
@ -72,27 +67,29 @@ struct IsValueInRangeFastOp<Dst, Src> {
|
|||
// Signed to unsigned range comparison.
|
||||
template <typename Dst, typename Src>
|
||||
requires(std::unsigned_integral<Dst> && std::signed_integral<Src> &&
|
||||
!IsTypeInRangeForNumericType<Dst, Src>::value)
|
||||
!kIsTypeInRangeForNumericType<Dst, Src>)
|
||||
struct IsValueInRangeFastOp<Dst, Src> {
|
||||
static constexpr bool is_supported = true;
|
||||
|
||||
static constexpr bool Do(Src value) {
|
||||
// We cast a signed as unsigned to overflow negative values to the top,
|
||||
// then compare against whichever maximum is smaller, as our upper bound.
|
||||
return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
|
||||
return as_unsigned(value) <= as_unsigned(kCommonMax<Src, Dst>);
|
||||
}
|
||||
};
|
||||
|
||||
// Convenience function that returns true if the supplied value is in range
|
||||
// for the destination type.
|
||||
template <typename Dst, typename Src>
|
||||
requires(IsNumeric<Src> && std::is_arithmetic_v<Dst> &&
|
||||
std::numeric_limits<Dst>::lowest() < std::numeric_limits<Dst>::max())
|
||||
constexpr bool IsValueInRangeForNumericType(Src value) {
|
||||
using SrcType = typename internal::UnderlyingType<Src>::type;
|
||||
using SrcType = UnderlyingType<Src>;
|
||||
const auto underlying_value = static_cast<SrcType>(value);
|
||||
return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
|
||||
? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
|
||||
static_cast<SrcType>(value))
|
||||
: internal::DstRangeRelationToSrcRange<Dst>(
|
||||
static_cast<SrcType>(value))
|
||||
underlying_value)
|
||||
: internal::DstRangeRelationToSrcRange<Dst>(underlying_value)
|
||||
.IsValid();
|
||||
}
|
||||
|
||||
|
@ -101,12 +98,13 @@ constexpr bool IsValueInRangeForNumericType(Src value) {
|
|||
// overflow or underflow. NaN source will always trigger a CHECK.
|
||||
template <typename Dst, class CheckHandler = internal::CheckOnFailure,
|
||||
typename Src>
|
||||
requires(IsNumeric<Src> && std::is_arithmetic_v<Dst> &&
|
||||
std::numeric_limits<Dst>::lowest() < std::numeric_limits<Dst>::max())
|
||||
constexpr Dst checked_cast(Src value) {
|
||||
// This throws a compile-time error on evaluating the constexpr if it can be
|
||||
// determined at compile-time as failing, otherwise it will CHECK at runtime.
|
||||
using SrcType = typename internal::UnderlyingType<Src>::type;
|
||||
if (IsValueInRangeForNumericType<Dst>(value)) [[likely]] {
|
||||
return static_cast<Dst>(static_cast<SrcType>(value));
|
||||
return static_cast<Dst>(static_cast<UnderlyingType<Src>>(value));
|
||||
}
|
||||
return CheckHandler::template HandleFailure<Dst>();
|
||||
}
|
||||
|
@ -160,7 +158,7 @@ constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
|
|||
template <typename Dst, typename Src>
|
||||
struct SaturateFastOp {
|
||||
static constexpr bool is_supported = false;
|
||||
static constexpr Dst Do(Src value) {
|
||||
static constexpr Dst Do(Src) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<Dst>();
|
||||
}
|
||||
|
@ -186,8 +184,8 @@ struct SaturateFastOp<Dst, Src> {
|
|||
// optimization heuristics across compilers. Do not change without
|
||||
// checking the emitted code.
|
||||
const Dst saturated = CommonMaxOrMin<Dst, Src>(
|
||||
IsMaxInRangeForNumericType<Dst, Src>() ||
|
||||
(!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
|
||||
kIsMaxInRangeForNumericType<Dst, Src> ||
|
||||
(!kIsMinInRangeForNumericType<Dst, Src> && IsValueNegative(value)));
|
||||
if (IsValueInRangeForNumericType<Dst>(value)) [[likely]] {
|
||||
return static_cast<Dst>(value);
|
||||
}
|
||||
|
@ -203,54 +201,47 @@ template <typename Dst,
|
|||
template <typename> class SaturationHandler = SaturationDefaultLimits,
|
||||
typename Src>
|
||||
constexpr Dst saturated_cast(Src value) {
|
||||
using SrcType = typename UnderlyingType<Src>::type;
|
||||
return !IsConstantEvaluated() && SaturateFastOp<Dst, SrcType>::is_supported &&
|
||||
using SrcType = UnderlyingType<Src>;
|
||||
const auto underlying_value = static_cast<SrcType>(value);
|
||||
return !std::is_constant_evaluated() &&
|
||||
SaturateFastOp<Dst, SrcType>::is_supported &&
|
||||
std::is_same_v<SaturationHandler<Dst>,
|
||||
SaturationDefaultLimits<Dst>>
|
||||
? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
|
||||
? SaturateFastOp<Dst, SrcType>::Do(underlying_value)
|
||||
: saturated_cast_impl<Dst, SaturationHandler, SrcType>(
|
||||
static_cast<SrcType>(value),
|
||||
underlying_value,
|
||||
DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
|
||||
static_cast<SrcType>(value)));
|
||||
underlying_value));
|
||||
}
|
||||
|
||||
// strict_cast<> is analogous to static_cast<> for numeric types, except that
|
||||
// it will cause a compile failure if the destination type is not large enough
|
||||
// to contain any value in the source type. It performs no runtime checking.
|
||||
template <typename Dst, typename Src>
|
||||
template <typename Dst, typename Src, typename SrcType = UnderlyingType<Src>>
|
||||
requires(
|
||||
IsNumeric<Src> && std::is_arithmetic_v<Dst> &&
|
||||
// If you got here from a compiler error, it's because you tried to assign
|
||||
// from a source type to a destination type that has insufficient range.
|
||||
// The solution may be to change the destination type you're assigning to,
|
||||
// and use one large enough to represent the source.
|
||||
// Alternatively, you may be better served with the checked_cast<> or
|
||||
// saturated_cast<> template functions for your particular use case.
|
||||
kStaticDstRangeRelationToSrcRange<Dst, SrcType> ==
|
||||
NumericRangeRepresentation::kContained)
|
||||
constexpr Dst strict_cast(Src value) {
|
||||
using SrcType = typename UnderlyingType<Src>::type;
|
||||
static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
|
||||
static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
|
||||
|
||||
// If you got here from a compiler error, it's because you tried to assign
|
||||
// from a source type to a destination type that has insufficient range.
|
||||
// The solution may be to change the destination type you're assigning to,
|
||||
// and use one large enough to represent the source.
|
||||
// Alternatively, you may be better served with the checked_cast<> or
|
||||
// saturated_cast<> template functions for your particular use case.
|
||||
static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
|
||||
NUMERIC_RANGE_CONTAINED,
|
||||
"The source type is out of range for the destination type. "
|
||||
"Please see strict_cast<> comments for more information.");
|
||||
|
||||
return static_cast<Dst>(static_cast<SrcType>(value));
|
||||
}
|
||||
|
||||
// Some wrappers to statically check that a type is in range.
|
||||
template <typename Dst, typename Src>
|
||||
struct IsNumericRangeContained {
|
||||
static constexpr bool value = false;
|
||||
};
|
||||
inline constexpr bool kIsNumericRangeContained = false;
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
requires(ArithmeticOrUnderlyingEnum<Dst>::value &&
|
||||
ArithmeticOrUnderlyingEnum<Src>::value)
|
||||
struct IsNumericRangeContained<Dst, Src> {
|
||||
static constexpr bool value =
|
||||
StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
|
||||
NUMERIC_RANGE_CONTAINED;
|
||||
};
|
||||
requires(std::is_arithmetic_v<ArithmeticOrUnderlyingEnum<Dst>> &&
|
||||
std::is_arithmetic_v<ArithmeticOrUnderlyingEnum<Src>>)
|
||||
inline constexpr bool kIsNumericRangeContained<Dst, Src> =
|
||||
kStaticDstRangeRelationToSrcRange<Dst, Src> ==
|
||||
NumericRangeRepresentation::kContained;
|
||||
|
||||
// StrictNumeric implements compile time range checking between numeric types by
|
||||
// wrapping assignment operations in a strict_cast. This class is intended to be
|
||||
|
@ -263,6 +254,7 @@ struct IsNumericRangeContained<Dst, Src> {
|
|||
// runtime checking of any of the associated mathematical operations. Use
|
||||
// CheckedNumeric for runtime range checks of the actual value being assigned.
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class StrictNumeric {
|
||||
public:
|
||||
using type = T;
|
||||
|
@ -274,12 +266,6 @@ class StrictNumeric {
|
|||
constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
|
||||
: value_(strict_cast<T>(rhs.value_)) {}
|
||||
|
||||
// Strictly speaking, this is not necessary, but declaring this allows class
|
||||
// template argument deduction to be used so that it is possible to simply
|
||||
// write `StrictNumeric(777)` instead of `StrictNumeric<int>(777)`.
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr StrictNumeric(T value) : value_(value) {}
|
||||
|
||||
// This is not an explicit constructor because we implicitly upgrade regular
|
||||
// numerics to StrictNumerics to make them easier to use.
|
||||
template <typename Src>
|
||||
|
@ -299,32 +285,37 @@ class StrictNumeric {
|
|||
// If none of that works, you may be better served with the checked_cast<> or
|
||||
// saturated_cast<> template functions for your particular use case.
|
||||
template <typename Dst>
|
||||
requires(IsNumericRangeContained<Dst, T>::value)
|
||||
constexpr operator Dst() const {
|
||||
return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
|
||||
requires(kIsNumericRangeContained<Dst, T>)
|
||||
constexpr operator Dst() const { // NOLINT(runtime/explicit)
|
||||
return static_cast<ArithmeticOrUnderlyingEnum<Dst>>(value_);
|
||||
}
|
||||
|
||||
// Unary negation does not require any conversions.
|
||||
constexpr bool operator!() const { return !value_; }
|
||||
|
||||
private:
|
||||
template <typename>
|
||||
template <typename U>
|
||||
requires std::is_arithmetic_v<U>
|
||||
friend class StrictNumeric;
|
||||
|
||||
T value_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
StrictNumeric(T) -> StrictNumeric<T>;
|
||||
|
||||
// Convenience wrapper returns a StrictNumeric from the provided arithmetic
|
||||
// type.
|
||||
template <typename T>
|
||||
constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
|
||||
const T value) {
|
||||
constexpr StrictNumeric<UnderlyingType<T>> MakeStrictNum(const T value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
|
||||
template <typename L, typename R> \
|
||||
requires(internal::Is##CLASS##Op<L, R>::value) \
|
||||
constexpr bool operator OP(const L lhs, const R rhs) { \
|
||||
return SafeCompare<NAME, typename UnderlyingType<L>::type, \
|
||||
typename UnderlyingType<R>::type>(lhs, rhs); \
|
||||
#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
|
||||
template <typename L, typename R> \
|
||||
requires(internal::Is##CLASS##Op<L, R>) \
|
||||
constexpr bool operator OP(L lhs, R rhs) { \
|
||||
return SafeCompare<NAME, UnderlyingType<L>, UnderlyingType<R>>(lhs, rhs); \
|
||||
}
|
||||
|
||||
BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
|
||||
|
@ -339,9 +330,9 @@ BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
|
|||
using internal::as_signed;
|
||||
using internal::as_unsigned;
|
||||
using internal::checked_cast;
|
||||
using internal::IsTypeInRangeForNumericType;
|
||||
using internal::IsValueInRangeForNumericType;
|
||||
using internal::IsValueNegative;
|
||||
using internal::kIsTypeInRangeForNumericType;
|
||||
using internal::MakeStrictNum;
|
||||
using internal::SafeUnsignedAbs;
|
||||
using internal::saturated_cast;
|
||||
|
@ -395,4 +386,4 @@ Dst ClampRound(Src value) {
|
|||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_SAFE_CONVERSIONS_H_
|
||||
#endif // V8_BASE_NUMERICS_SAFE_CONVERSIONS_H_
|
56
deps/v8/src/base/numerics/safe_conversions_arm_impl.h
vendored
Normal file
56
deps/v8/src/base/numerics/safe_conversions_arm_impl.h
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
|
||||
|
||||
// IWYU pragma: private, include "src/base/numerics/safe_conversions.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <concepts>
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/numerics/safe_conversions_impl.h"
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
// Fast saturation to a destination type.
|
||||
template <typename Dst, typename Src>
|
||||
struct SaturateFastAsmOp {
|
||||
static constexpr bool is_supported =
|
||||
kEnableAsmCode && std::signed_integral<Src> && std::integral<Dst> &&
|
||||
kIntegerBitsPlusSign<Src> <= kIntegerBitsPlusSign<int32_t> &&
|
||||
kIntegerBitsPlusSign<Dst> <= kIntegerBitsPlusSign<int32_t> &&
|
||||
!kIsTypeInRangeForNumericType<Dst, Src>;
|
||||
|
||||
__attribute__((always_inline)) static Dst Do(Src value) {
|
||||
int32_t src = value;
|
||||
if constexpr (std::is_signed_v<Dst>) {
|
||||
int32_t result;
|
||||
asm("ssat %[dst], %[shift], %[src]"
|
||||
: [dst] "=r"(result)
|
||||
: [src] "r"(src), [shift] "n"(
|
||||
std::min(kIntegerBitsPlusSign<Dst>, 32)));
|
||||
return static_cast<Dst>(result);
|
||||
} else {
|
||||
uint32_t result;
|
||||
asm("usat %[dst], %[shift], %[src]"
|
||||
: [dst] "=r"(result)
|
||||
: [src] "r"(src), [shift] "n"(
|
||||
std::min(kIntegerBitsPlusSign<Dst>, 31)));
|
||||
return static_cast<Dst>(result);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
|
697
deps/v8/src/base/numerics/safe_conversions_impl.h
vendored
Normal file
697
deps/v8/src/base/numerics/safe_conversions_impl.h
vendored
Normal file
|
@ -0,0 +1,697 @@
|
|||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
|
||||
|
||||
// IWYU pragma: private, include "src/base/numerics/safe_conversions.h"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <concepts>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "src/base/numerics/integral_constant_like.h"
|
||||
|
||||
namespace v8::base::internal {
|
||||
|
||||
// The std library doesn't provide a binary max_exponent for integers, however
|
||||
// we can compute an analog using std::numeric_limits<>::digits.
|
||||
template <typename NumericType>
|
||||
inline constexpr int kMaxExponent =
|
||||
std::is_floating_point_v<NumericType>
|
||||
? std::numeric_limits<NumericType>::max_exponent
|
||||
: std::numeric_limits<NumericType>::digits + 1;
|
||||
|
||||
// The number of bits (including the sign) in an integer. Eliminates sizeof
|
||||
// hacks.
|
||||
template <typename NumericType>
|
||||
inline constexpr int kIntegerBitsPlusSign =
|
||||
std::numeric_limits<NumericType>::digits + std::is_signed_v<NumericType>;
|
||||
|
||||
// Determines if a numeric value is negative without throwing compiler
|
||||
// warnings on: unsigned(value) < 0.
|
||||
template <typename T>
|
||||
requires(std::is_arithmetic_v<T>)
|
||||
constexpr bool IsValueNegative(T value) {
|
||||
if constexpr (std::is_signed_v<T>) {
|
||||
return value < 0;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// This performs a fast negation, returning a signed value. It works on unsigned
|
||||
// arguments, but probably doesn't do what you want for any unsigned value
|
||||
// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
|
||||
template <typename T>
|
||||
requires std::is_integral_v<T>
|
||||
constexpr auto ConditionalNegate(T x, bool is_negative) {
|
||||
using SignedT = std::make_signed_t<T>;
|
||||
using UnsignedT = std::make_unsigned_t<T>;
|
||||
return static_cast<SignedT>((static_cast<UnsignedT>(x) ^
|
||||
static_cast<UnsignedT>(-SignedT(is_negative))) +
|
||||
is_negative);
|
||||
}
|
||||
|
||||
// This performs a safe, absolute value via unsigned overflow.
|
||||
template <typename T>
|
||||
requires std::is_integral_v<T>
|
||||
constexpr auto SafeUnsignedAbs(T value) {
|
||||
using UnsignedT = std::make_unsigned_t<T>;
|
||||
return IsValueNegative(value)
|
||||
? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
|
||||
: static_cast<UnsignedT>(value);
|
||||
}
|
||||
|
||||
// TODO(jschuh): Debug builds don't reliably propagate constants, so we restrict
|
||||
// some accelerated runtime paths to release builds until this can be forced
|
||||
// with consteval support in C++20 or C++23.
|
||||
#if defined(NDEBUG)
|
||||
inline constexpr bool kEnableAsmCode = true;
|
||||
#else
|
||||
inline constexpr bool kEnableAsmCode = false;
|
||||
#endif
|
||||
|
||||
// Forces a crash, like a NOTREACHED(). Used for numeric boundary errors.
|
||||
// Also used in a constexpr template to trigger a compilation failure on
|
||||
// an error condition.
|
||||
struct CheckOnFailure {
|
||||
template <typename T>
|
||||
static T HandleFailure() {
|
||||
#if defined(_MSC_VER)
|
||||
__debugbreak();
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
__builtin_trap();
|
||||
#else
|
||||
((void)(*(volatile char*)0 = 0));
|
||||
#endif
|
||||
return T();
|
||||
}
|
||||
};
|
||||
|
||||
enum class IntegerRepresentation { kUnsigned, kSigned };
|
||||
|
||||
// A range for a given nunmeric Src type is contained for a given numeric Dst
|
||||
// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
|
||||
// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
|
||||
// We implement this as template specializations rather than simple static
|
||||
// comparisons to ensure type correctness in our comparisons.
|
||||
enum class NumericRangeRepresentation { kNotContained, kContained };
|
||||
|
||||
// Helper templates to statically determine if our destination type can contain
|
||||
// maximum and minimum values represented by the source type.
|
||||
|
||||
// Default case, used for same sign: Dst is guaranteed to contain Src only if
|
||||
// its range is equal or larger.
|
||||
template <typename Dst, typename Src,
|
||||
IntegerRepresentation DstSign =
|
||||
std::is_signed_v<Dst> ? IntegerRepresentation::kSigned
|
||||
: IntegerRepresentation::kUnsigned,
|
||||
IntegerRepresentation SrcSign =
|
||||
std::is_signed_v<Src> ? IntegerRepresentation::kSigned
|
||||
: IntegerRepresentation::kUnsigned>
|
||||
inline constexpr auto kStaticDstRangeRelationToSrcRange =
|
||||
kMaxExponent<Dst> >= kMaxExponent<Src>
|
||||
? NumericRangeRepresentation::kContained
|
||||
: NumericRangeRepresentation::kNotContained;
|
||||
|
||||
// Unsigned to signed: Dst is guaranteed to contain source only if its range is
|
||||
// larger.
|
||||
template <typename Dst, typename Src>
|
||||
inline constexpr auto
|
||||
kStaticDstRangeRelationToSrcRange<Dst, Src, IntegerRepresentation::kSigned,
|
||||
IntegerRepresentation::kUnsigned> =
|
||||
kMaxExponent<Dst> > kMaxExponent<Src>
|
||||
? NumericRangeRepresentation::kContained
|
||||
: NumericRangeRepresentation::kNotContained;
|
||||
|
||||
// Signed to unsigned: Dst cannot be statically determined to contain Src.
|
||||
template <typename Dst, typename Src>
|
||||
inline constexpr auto kStaticDstRangeRelationToSrcRange<
|
||||
Dst, Src, IntegerRepresentation::kUnsigned,
|
||||
IntegerRepresentation::kSigned> = NumericRangeRepresentation::kNotContained;
|
||||
|
||||
// This class wraps the range constraints as separate booleans so the compiler
|
||||
// can identify constants and eliminate unused code paths.
|
||||
class RangeCheck {
|
||||
public:
|
||||
constexpr RangeCheck() = default;
|
||||
constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
|
||||
: is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
|
||||
|
||||
constexpr bool operator==(const RangeCheck& rhs) const = default;
|
||||
|
||||
constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
|
||||
constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
|
||||
constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
|
||||
constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
|
||||
constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
|
||||
constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
|
||||
|
||||
private:
|
||||
// Do not change the order of these member variables. The integral conversion
|
||||
// optimization depends on this exact order.
|
||||
const bool is_underflow_ = false;
|
||||
const bool is_overflow_ = false;
|
||||
};
|
||||
|
||||
// The following helper template addresses a corner case in range checks for
|
||||
// conversion from a floating-point type to an integral type of smaller range
|
||||
// but larger precision (e.g. float -> unsigned). The problem is as follows:
|
||||
// 1. Integral maximum is always one less than a power of two, so it must be
|
||||
// truncated to fit the mantissa of the floating point. The direction of
|
||||
// rounding is implementation defined, but by default it's always IEEE
|
||||
// floats, which round to nearest and thus result in a value of larger
|
||||
// magnitude than the integral value.
|
||||
// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
|
||||
// // is 4294967295u.
|
||||
// 2. If the floating point value is equal to the promoted integral maximum
|
||||
// value, a range check will erroneously pass.
|
||||
// Example: (4294967296f <= 4294967295u) // This is true due to a precision
|
||||
// // loss in rounding up to float.
|
||||
// 3. When the floating point value is then converted to an integral, the
|
||||
// resulting value is out of range for the target integral type and
|
||||
// thus is implementation defined.
|
||||
// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
|
||||
// To fix this bug we manually truncate the maximum value when the destination
|
||||
// type is an integral of larger precision than the source floating-point type,
|
||||
// such that the resulting maximum is represented exactly as a floating point.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct NarrowingRange {
|
||||
using SrcLimits = std::numeric_limits<Src>;
|
||||
using DstLimits = std::numeric_limits<Dst>;
|
||||
|
||||
// Computes the mask required to make an accurate comparison between types.
|
||||
static constexpr int kShift = (kMaxExponent<Src> > kMaxExponent<Dst> &&
|
||||
SrcLimits::digits < DstLimits::digits)
|
||||
? (DstLimits::digits - SrcLimits::digits)
|
||||
: 0;
|
||||
|
||||
template <typename T>
|
||||
requires(std::same_as<T, Dst> &&
|
||||
((std::integral<T> && kShift < DstLimits::digits) ||
|
||||
(std::floating_point<T> && kShift == 0)))
|
||||
// Masks out the integer bits that are beyond the precision of the
|
||||
// intermediate type used for comparison.
|
||||
static constexpr T Adjust(T value) {
|
||||
if constexpr (std::integral<T>) {
|
||||
using UnsignedDst = typename std::make_unsigned_t<T>;
|
||||
return static_cast<T>(
|
||||
ConditionalNegate(SafeUnsignedAbs(value) &
|
||||
~((UnsignedDst{1} << kShift) - UnsignedDst{1}),
|
||||
IsValueNegative(value)));
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
|
||||
static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
|
||||
};
|
||||
|
||||
// The following templates are for ranges that must be verified at runtime. We
|
||||
// split it into checks based on signedness to avoid confusing casts and
|
||||
// compiler warnings on signed an unsigned comparisons.
|
||||
|
||||
// Default case, used for same sign narrowing: The range is contained for normal
|
||||
// limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds,
|
||||
IntegerRepresentation DstSign =
|
||||
std::is_signed_v<Dst> ? IntegerRepresentation::kSigned
|
||||
: IntegerRepresentation::kUnsigned,
|
||||
IntegerRepresentation SrcSign =
|
||||
std::is_signed_v<Src> ? IntegerRepresentation::kSigned
|
||||
: IntegerRepresentation::kUnsigned,
|
||||
NumericRangeRepresentation DstRange =
|
||||
kStaticDstRangeRelationToSrcRange<Dst, Src>>
|
||||
struct DstRangeRelationToSrcRangeImpl {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using SrcLimits = std::numeric_limits<Src>;
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
return RangeCheck(
|
||||
static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
|
||||
static_cast<Dst>(value) >= DstLimits::lowest(),
|
||||
static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
|
||||
static_cast<Dst>(value) <= DstLimits::max());
|
||||
}
|
||||
};
|
||||
|
||||
// Signed to signed narrowing: Both the upper and lower boundaries may be
|
||||
// exceeded for standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, IntegerRepresentation::kSigned,
|
||||
IntegerRepresentation::kSigned, NumericRangeRepresentation::kNotContained> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
|
||||
}
|
||||
};
|
||||
|
||||
// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
|
||||
// standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, IntegerRepresentation::kUnsigned,
|
||||
IntegerRepresentation::kUnsigned,
|
||||
NumericRangeRepresentation::kNotContained> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
return RangeCheck(
|
||||
DstLimits::lowest() == Dst{0} || value >= DstLimits::lowest(),
|
||||
value <= DstLimits::max());
|
||||
}
|
||||
};
|
||||
|
||||
// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, IntegerRepresentation::kSigned,
|
||||
IntegerRepresentation::kUnsigned,
|
||||
NumericRangeRepresentation::kNotContained> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
using Promotion = decltype(Src() + Dst());
|
||||
return RangeCheck(DstLimits::lowest() <= Dst{0} ||
|
||||
static_cast<Promotion>(value) >=
|
||||
static_cast<Promotion>(DstLimits::lowest()),
|
||||
static_cast<Promotion>(value) <=
|
||||
static_cast<Promotion>(DstLimits::max()));
|
||||
}
|
||||
};
|
||||
|
||||
// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
|
||||
// and any negative value exceeds the lower boundary for standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, IntegerRepresentation::kUnsigned,
|
||||
IntegerRepresentation::kSigned, NumericRangeRepresentation::kNotContained> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using SrcLimits = std::numeric_limits<Src>;
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
using Promotion = decltype(Src() + Dst());
|
||||
bool ge_zero;
|
||||
// Converting floating-point to integer will discard fractional part, so
|
||||
// values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
|
||||
if constexpr (std::is_floating_point_v<Src>) {
|
||||
ge_zero = value > Src{-1};
|
||||
} else {
|
||||
ge_zero = value >= Src{0};
|
||||
}
|
||||
return RangeCheck(
|
||||
ge_zero && (DstLimits::lowest() == 0 ||
|
||||
static_cast<Dst>(value) >= DstLimits::lowest()),
|
||||
static_cast<Promotion>(SrcLimits::max()) <=
|
||||
static_cast<Promotion>(DstLimits::max()) ||
|
||||
static_cast<Promotion>(value) <=
|
||||
static_cast<Promotion>(DstLimits::max()));
|
||||
}
|
||||
};
|
||||
|
||||
// Simple wrapper for statically checking if a type's range is contained.
|
||||
template <typename Dst, typename Src>
|
||||
inline constexpr bool kIsTypeInRangeForNumericType =
|
||||
kStaticDstRangeRelationToSrcRange<Dst, Src> ==
|
||||
NumericRangeRepresentation::kContained;
|
||||
|
||||
template <typename Dst, template <typename> class Bounds = std::numeric_limits,
|
||||
typename Src>
|
||||
requires(std::is_arithmetic_v<Src> && std::is_arithmetic_v<Dst> &&
|
||||
Bounds<Dst>::lowest() < Bounds<Dst>::max())
|
||||
constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
|
||||
return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
|
||||
}
|
||||
|
||||
// Integer promotion templates used by the portable checked integer arithmetic.
|
||||
template <size_t Size, bool IsSigned>
|
||||
struct IntegerForDigitsAndSignImpl;
|
||||
|
||||
#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
|
||||
template <> \
|
||||
struct IntegerForDigitsAndSignImpl<kIntegerBitsPlusSign<I>, \
|
||||
std::is_signed_v<I>> { \
|
||||
using type = I; \
|
||||
}
|
||||
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
|
||||
#undef INTEGER_FOR_DIGITS_AND_SIGN
|
||||
|
||||
template <size_t Size, bool IsSigned>
|
||||
using IntegerForDigitsAndSign =
|
||||
IntegerForDigitsAndSignImpl<Size, IsSigned>::type;
|
||||
|
||||
// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
|
||||
// support 128-bit math, then the ArithmeticPromotion template below will need
|
||||
// to be updated (or more likely replaced with a decltype expression).
|
||||
static_assert(kIntegerBitsPlusSign<intmax_t> == 64,
|
||||
"Max integer size not supported for this toolchain.");
|
||||
|
||||
template <typename Integer, bool IsSigned = std::is_signed_v<Integer>>
|
||||
using TwiceWiderInteger =
|
||||
IntegerForDigitsAndSign<kIntegerBitsPlusSign<Integer> * 2, IsSigned>;
|
||||
|
||||
// Determines the type that can represent the largest positive value.
|
||||
template <typename Lhs, typename Rhs>
|
||||
using MaxExponentPromotion =
|
||||
std::conditional_t<(kMaxExponent<Lhs> > kMaxExponent<Rhs>), Lhs, Rhs>;
|
||||
|
||||
// Determines the type that can represent the lowest arithmetic value.
|
||||
template <typename Lhs, typename Rhs>
|
||||
using LowestValuePromotion = std::conditional_t<
|
||||
std::is_signed_v<Lhs>
|
||||
? (!std::is_signed_v<Rhs> || kMaxExponent<Lhs> > kMaxExponent<Rhs>)
|
||||
: (!std::is_signed_v<Rhs> && kMaxExponent<Lhs> < kMaxExponent<Rhs>),
|
||||
Lhs, Rhs>;
|
||||
|
||||
// Determines the type that is best able to represent an arithmetic result.
|
||||
|
||||
// Default case, used when the side with the max exponent is big enough.
|
||||
template <typename Lhs, typename Rhs = Lhs,
|
||||
bool is_intmax_type =
|
||||
std::is_integral_v<MaxExponentPromotion<Lhs, Rhs>> &&
|
||||
kIntegerBitsPlusSign<MaxExponentPromotion<Lhs, Rhs>> ==
|
||||
kIntegerBitsPlusSign<intmax_t>,
|
||||
bool is_max_exponent = kStaticDstRangeRelationToSrcRange<
|
||||
MaxExponentPromotion<Lhs, Rhs>, Lhs> ==
|
||||
NumericRangeRepresentation::kContained &&
|
||||
kStaticDstRangeRelationToSrcRange<
|
||||
MaxExponentPromotion<Lhs, Rhs>, Rhs> ==
|
||||
NumericRangeRepresentation::kContained>
|
||||
struct BigEnoughPromotionImpl {
|
||||
using type = MaxExponentPromotion<Lhs, Rhs>;
|
||||
static constexpr bool kContained = true;
|
||||
};
|
||||
|
||||
// We can use a twice wider type to fit.
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct BigEnoughPromotionImpl<Lhs, Rhs, false, false> {
|
||||
using type =
|
||||
TwiceWiderInteger<MaxExponentPromotion<Lhs, Rhs>,
|
||||
std::is_signed_v<Lhs> || std::is_signed_v<Rhs>>;
|
||||
static constexpr bool kContained = true;
|
||||
};
|
||||
|
||||
// No type is large enough.
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct BigEnoughPromotionImpl<Lhs, Rhs, true, false> {
|
||||
using type = MaxExponentPromotion<Lhs, Rhs>;
|
||||
static constexpr bool kContained = false;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
using BigEnoughPromotion = BigEnoughPromotionImpl<Lhs, Rhs>::type;
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
inline constexpr bool kIsBigEnoughPromotionContained =
|
||||
BigEnoughPromotionImpl<Lhs, Rhs>::kContained;
|
||||
|
||||
// We can statically check if operations on the provided types can wrap, so we
|
||||
// can skip the checked operations if they're not needed. So, for an integer we
|
||||
// care if the destination type preserves the sign and is twice the width of
|
||||
// the source.
|
||||
template <typename T, typename Lhs, typename Rhs = Lhs>
|
||||
inline constexpr bool kIsIntegerArithmeticSafe =
|
||||
!std::is_floating_point_v<T> && !std::is_floating_point_v<Lhs> &&
|
||||
!std::is_floating_point_v<Rhs> &&
|
||||
std::is_signed_v<T> >= std::is_signed_v<Lhs> &&
|
||||
kIntegerBitsPlusSign<T> >= (2 * kIntegerBitsPlusSign<Lhs>) &&
|
||||
std::is_signed_v<T> >= std::is_signed_v<Rhs> &&
|
||||
kIntegerBitsPlusSign<T> >= (2 * kIntegerBitsPlusSign<Rhs>);
|
||||
|
||||
// Promotes to a type that can represent any possible result of a binary
|
||||
// arithmetic operation with the source types.
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct FastIntegerArithmeticPromotionImpl {
|
||||
using type = BigEnoughPromotion<Lhs, Rhs>;
|
||||
static constexpr bool kContained = false;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
requires(kIsIntegerArithmeticSafe<
|
||||
std::conditional_t<std::is_signed_v<Lhs> || std::is_signed_v<Rhs>,
|
||||
intmax_t, uintmax_t>,
|
||||
MaxExponentPromotion<Lhs, Rhs>>)
|
||||
struct FastIntegerArithmeticPromotionImpl<Lhs, Rhs> {
|
||||
using type =
|
||||
TwiceWiderInteger<MaxExponentPromotion<Lhs, Rhs>,
|
||||
std::is_signed_v<Lhs> || std::is_signed_v<Rhs>>;
|
||||
static_assert(kIsIntegerArithmeticSafe<type, Lhs, Rhs>);
|
||||
static constexpr bool kContained = true;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
using FastIntegerArithmeticPromotion =
|
||||
FastIntegerArithmeticPromotionImpl<Lhs, Rhs>::type;
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
inline constexpr bool kIsFastIntegerArithmeticPromotionContained =
|
||||
FastIntegerArithmeticPromotionImpl<Lhs, Rhs>::kContained;
|
||||
|
||||
template <typename T>
|
||||
struct ArithmeticOrIntegralConstant {
|
||||
using type = T;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires IntegralConstantLike<T>
|
||||
struct ArithmeticOrIntegralConstant<T> {
|
||||
using type = T::value_type;
|
||||
};
|
||||
|
||||
// Extracts the underlying type from an enum.
|
||||
template <typename T>
|
||||
using ArithmeticOrUnderlyingEnum =
|
||||
typename std::conditional_t<std::is_enum_v<T>, std::underlying_type<T>,
|
||||
ArithmeticOrIntegralConstant<T>>::type;
|
||||
|
||||
// The following are helper templates used in the CheckedNumeric class.
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class CheckedNumeric;
|
||||
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class ClampedNumeric;
|
||||
|
||||
template <typename T>
|
||||
requires std::is_arithmetic_v<T>
|
||||
class StrictNumeric;
|
||||
|
||||
// Used to treat CheckedNumeric and arithmetic underlying types the same.
|
||||
template <typename T>
|
||||
inline constexpr bool kIsCheckedNumeric = false;
|
||||
template <typename T>
|
||||
inline constexpr bool kIsCheckedNumeric<CheckedNumeric<T>> = true;
|
||||
template <typename T>
|
||||
concept IsCheckedNumeric = kIsCheckedNumeric<T>;
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool kIsClampedNumeric = false;
|
||||
template <typename T>
|
||||
inline constexpr bool kIsClampedNumeric<ClampedNumeric<T>> = true;
|
||||
template <typename T>
|
||||
concept IsClampedNumeric = kIsClampedNumeric<T>;
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool kIsStrictNumeric = false;
|
||||
template <typename T>
|
||||
inline constexpr bool kIsStrictNumeric<StrictNumeric<T>> = true;
|
||||
template <typename T>
|
||||
concept IsStrictNumeric = kIsStrictNumeric<T>;
|
||||
|
||||
template <typename T>
|
||||
struct UnderlyingTypeImpl {
|
||||
using type = ArithmeticOrUnderlyingEnum<T>;
|
||||
};
|
||||
template <typename T>
|
||||
struct UnderlyingTypeImpl<CheckedNumeric<T>> {
|
||||
using type = T;
|
||||
};
|
||||
template <typename T>
|
||||
struct UnderlyingTypeImpl<ClampedNumeric<T>> {
|
||||
using type = T;
|
||||
};
|
||||
template <typename T>
|
||||
struct UnderlyingTypeImpl<StrictNumeric<T>> {
|
||||
using type = T;
|
||||
};
|
||||
template <typename T>
|
||||
using UnderlyingType = UnderlyingTypeImpl<T>::type;
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool kIsNumeric = std::is_arithmetic_v<UnderlyingType<T>>;
|
||||
template <typename T>
|
||||
requires(IsCheckedNumeric<T> || IsClampedNumeric<T> || IsStrictNumeric<T>)
|
||||
inline constexpr bool kIsNumeric<T> = true;
|
||||
template <typename T>
|
||||
concept IsNumeric = kIsNumeric<T>;
|
||||
|
||||
template <typename L, typename R>
|
||||
concept IsCheckedOp = (IsCheckedNumeric<L> && IsNumeric<R>) ||
|
||||
(IsCheckedNumeric<R> && IsNumeric<L>);
|
||||
|
||||
template <typename L, typename R>
|
||||
concept IsClampedOp =
|
||||
!IsCheckedOp<L, R> && ((IsClampedNumeric<L> && IsNumeric<R>) ||
|
||||
(IsClampedNumeric<R> && IsNumeric<L>));
|
||||
|
||||
template <typename L, typename R>
|
||||
concept IsStrictOp = !IsCheckedOp<L, R> && !IsClampedOp<L, R> &&
|
||||
((IsStrictNumeric<L> && IsNumeric<R>) ||
|
||||
(IsStrictNumeric<R> && IsNumeric<L>));
|
||||
|
||||
// as_signed<> returns the supplied integral value (or integral castable
|
||||
// Numeric template) cast as a signed integral of equivalent precision.
|
||||
// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
|
||||
template <typename Src, typename Dst = std::make_signed_t<UnderlyingType<Src>>>
|
||||
requires std::integral<Dst>
|
||||
constexpr auto as_signed(Src value) {
|
||||
return static_cast<Dst>(value);
|
||||
}
|
||||
|
||||
// as_unsigned<> returns the supplied integral value (or integral castable
|
||||
// Numeric template) cast as an unsigned integral of equivalent precision.
|
||||
// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
|
||||
template <typename Src,
|
||||
typename Dst = std::make_unsigned_t<UnderlyingType<Src>>>
|
||||
requires std::integral<Dst>
|
||||
constexpr auto as_unsigned(Src value) {
|
||||
return static_cast<Dst>(value);
|
||||
}
|
||||
|
||||
template <typename L, typename R>
|
||||
requires std::is_arithmetic_v<L> && std::is_arithmetic_v<R>
|
||||
struct IsLess {
|
||||
using SumT = decltype(std::declval<L>() + std::declval<R>());
|
||||
static constexpr bool Test(L lhs, R rhs) {
|
||||
const RangeCheck l_range = DstRangeRelationToSrcRange<R>(lhs);
|
||||
const RangeCheck r_range = DstRangeRelationToSrcRange<L>(rhs);
|
||||
return l_range.IsUnderflow() || r_range.IsOverflow() ||
|
||||
(l_range == r_range &&
|
||||
static_cast<SumT>(lhs) < static_cast<SumT>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
requires std::is_arithmetic_v<L> && std::is_arithmetic_v<R>
|
||||
struct IsLessOrEqual {
|
||||
using SumT = decltype(std::declval<L>() + std::declval<R>());
|
||||
static constexpr bool Test(L lhs, R rhs) {
|
||||
const RangeCheck l_range = DstRangeRelationToSrcRange<R>(lhs);
|
||||
const RangeCheck r_range = DstRangeRelationToSrcRange<L>(rhs);
|
||||
return l_range.IsUnderflow() || r_range.IsOverflow() ||
|
||||
(l_range == r_range &&
|
||||
static_cast<SumT>(lhs) <= static_cast<SumT>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
requires std::is_arithmetic_v<L> && std::is_arithmetic_v<R>
|
||||
struct IsGreater {
|
||||
using SumT = decltype(std::declval<L>() + std::declval<R>());
|
||||
static constexpr bool Test(L lhs, R rhs) {
|
||||
const RangeCheck l_range = DstRangeRelationToSrcRange<R>(lhs);
|
||||
const RangeCheck r_range = DstRangeRelationToSrcRange<L>(rhs);
|
||||
return l_range.IsOverflow() || r_range.IsUnderflow() ||
|
||||
(l_range == r_range &&
|
||||
static_cast<SumT>(lhs) > static_cast<SumT>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
requires std::is_arithmetic_v<L> && std::is_arithmetic_v<R>
|
||||
struct IsGreaterOrEqual {
|
||||
using SumT = decltype(std::declval<L>() + std::declval<R>());
|
||||
static constexpr bool Test(L lhs, R rhs) {
|
||||
const RangeCheck l_range = DstRangeRelationToSrcRange<R>(lhs);
|
||||
const RangeCheck r_range = DstRangeRelationToSrcRange<L>(rhs);
|
||||
return l_range.IsOverflow() || r_range.IsUnderflow() ||
|
||||
(l_range == r_range &&
|
||||
static_cast<SumT>(lhs) >= static_cast<SumT>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
requires std::is_arithmetic_v<L> && std::is_arithmetic_v<R>
|
||||
struct IsEqual {
|
||||
using SumT = decltype(std::declval<L>() + std::declval<R>());
|
||||
static constexpr bool Test(L lhs, R rhs) {
|
||||
return DstRangeRelationToSrcRange<R>(lhs) ==
|
||||
DstRangeRelationToSrcRange<L>(rhs) &&
|
||||
static_cast<SumT>(lhs) == static_cast<SumT>(rhs);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
requires std::is_arithmetic_v<L> && std::is_arithmetic_v<R>
|
||||
struct IsNotEqual {
|
||||
using SumT = decltype(std::declval<L>() + std::declval<R>());
|
||||
static constexpr bool Test(L lhs, R rhs) {
|
||||
return DstRangeRelationToSrcRange<R>(lhs) !=
|
||||
DstRangeRelationToSrcRange<L>(rhs) ||
|
||||
static_cast<SumT>(lhs) != static_cast<SumT>(rhs);
|
||||
}
|
||||
};
|
||||
|
||||
// These perform the actual math operations on the CheckedNumerics.
|
||||
// Binary arithmetic operations.
|
||||
template <template <typename, typename> typename C, typename L, typename R>
|
||||
requires std::is_arithmetic_v<L> && std::is_arithmetic_v<R>
|
||||
constexpr bool SafeCompare(L lhs, R rhs) {
|
||||
using BigType = BigEnoughPromotion<L, R>;
|
||||
return kIsBigEnoughPromotionContained<L, R>
|
||||
// Force to a larger type for speed if both are contained.
|
||||
? C<BigType, BigType>::Test(static_cast<BigType>(lhs),
|
||||
static_cast<BigType>(rhs))
|
||||
// Let the template functions figure it out for mixed types.
|
||||
: C<L, R>::Test(lhs, rhs);
|
||||
}
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
inline constexpr bool kIsMaxInRangeForNumericType =
|
||||
IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
|
||||
std::numeric_limits<Src>::max());
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
inline constexpr bool kIsMinInRangeForNumericType =
|
||||
IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
|
||||
std::numeric_limits<Src>::lowest());
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
inline constexpr Dst kCommonMax =
|
||||
kIsMaxInRangeForNumericType<Dst, Src>
|
||||
? static_cast<Dst>(std::numeric_limits<Src>::max())
|
||||
: std::numeric_limits<Dst>::max();
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
inline constexpr Dst kCommonMin =
|
||||
kIsMinInRangeForNumericType<Dst, Src>
|
||||
? static_cast<Dst>(std::numeric_limits<Src>::lowest())
|
||||
: std::numeric_limits<Dst>::lowest();
|
||||
|
||||
// This is a wrapper to generate return the max or min for a supplied type.
|
||||
// If the argument is false, the returned value is the maximum. If true the
|
||||
// returned value is the minimum.
|
||||
template <typename Dst, typename Src = Dst>
|
||||
constexpr Dst CommonMaxOrMin(bool is_min) {
|
||||
return is_min ? kCommonMin<Dst, Src> : kCommonMax<Dst, Src>;
|
||||
}
|
||||
|
||||
} // namespace v8::base::internal
|
||||
|
||||
#endif // V8_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
|
15
deps/v8/src/base/numerics/safe_math.h
vendored
Normal file
15
deps/v8/src/base/numerics/safe_math.h
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_SAFE_MATH_H_
|
||||
#define V8_BASE_NUMERICS_SAFE_MATH_H_
|
||||
|
||||
#include "src/base/numerics/checked_math.h" // IWYU pragma: export
|
||||
#include "src/base/numerics/clamped_math.h" // IWYU pragma: export
|
||||
#include "src/base/numerics/safe_conversions.h" // IWYU pragma: export
|
||||
|
||||
#endif // V8_BASE_NUMERICS_SAFE_MATH_H_
|
128
deps/v8/src/base/numerics/safe_math_arm_impl.h
vendored
Normal file
128
deps/v8/src/base/numerics/safe_math_arm_impl.h
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
|
||||
|
||||
// IWYU pragma: private
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <cassert>
|
||||
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
|
||||
namespace v8::base::internal {
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedMulFastAsmOp {
|
||||
static constexpr bool is_supported =
|
||||
kEnableAsmCode && kIsFastIntegerArithmeticPromotionContained<T, U>;
|
||||
|
||||
// The following is not an assembler routine and is thus constexpr safe, it
|
||||
// just emits much more efficient code than the Clang and GCC builtins for
|
||||
// performing overflow-checked multiplication when a twice wider type is
|
||||
// available. The below compiles down to 2-3 instructions, depending on the
|
||||
// width of the types in use.
|
||||
// As an example, an int32_t multiply compiles to:
|
||||
// smull r0, r1, r0, r1
|
||||
// cmp r1, r1, asr #31
|
||||
// And an int16_t multiply compiles to:
|
||||
// smulbb r1, r1, r0
|
||||
// asr r2, r1, #16
|
||||
// cmp r2, r1, asr #15
|
||||
template <typename V>
|
||||
static constexpr bool Do(T x, U y, V* result) {
|
||||
using Promotion = FastIntegerArithmeticPromotion<T, U>;
|
||||
Promotion presult;
|
||||
|
||||
presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
|
||||
if (!IsValueInRangeForNumericType<V>(presult)) {
|
||||
return false;
|
||||
}
|
||||
*result = static_cast<V>(presult);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedAddFastAsmOp {
|
||||
static constexpr bool is_supported =
|
||||
kEnableAsmCode && kIsBigEnoughPromotionContained<T, U> &&
|
||||
kIsTypeInRangeForNumericType<int32_t, BigEnoughPromotion<T, U>>;
|
||||
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static V Do(T x, U y) {
|
||||
// This will get promoted to an int, so let the compiler do whatever is
|
||||
// clever and rely on the saturated cast to bounds check.
|
||||
if constexpr (kIsIntegerArithmeticSafe<int, T, U>) {
|
||||
return saturated_cast<V>(static_cast<int>(x) + static_cast<int>(y));
|
||||
} else {
|
||||
int32_t result;
|
||||
int32_t x_i32 = checked_cast<int32_t>(x);
|
||||
int32_t y_i32 = checked_cast<int32_t>(y);
|
||||
|
||||
asm("qadd %[result], %[first], %[second]"
|
||||
: [result] "=r"(result)
|
||||
: [first] "r"(x_i32), [second] "r"(y_i32));
|
||||
return saturated_cast<V>(result);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedSubFastAsmOp {
|
||||
static constexpr bool is_supported =
|
||||
kEnableAsmCode && kIsBigEnoughPromotionContained<T, U> &&
|
||||
kIsTypeInRangeForNumericType<int32_t, BigEnoughPromotion<T, U>>;
|
||||
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static V Do(T x, U y) {
|
||||
// This will get promoted to an int, so let the compiler do whatever is
|
||||
// clever and rely on the saturated cast to bounds check.
|
||||
if constexpr (kIsIntegerArithmeticSafe<int, T, U>) {
|
||||
return saturated_cast<V>(static_cast<int>(x) - static_cast<int>(y));
|
||||
} else {
|
||||
int32_t result;
|
||||
int32_t x_i32 = checked_cast<int32_t>(x);
|
||||
int32_t y_i32 = checked_cast<int32_t>(y);
|
||||
|
||||
asm("qsub %[result], %[first], %[second]"
|
||||
: [result] "=r"(result)
|
||||
: [first] "r"(x_i32), [second] "r"(y_i32));
|
||||
return saturated_cast<V>(result);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedMulFastAsmOp {
|
||||
static constexpr bool is_supported =
|
||||
kEnableAsmCode && CheckedMulFastAsmOp<T, U>::is_supported;
|
||||
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static V Do(T x, U y) {
|
||||
// Use the CheckedMulFastAsmOp for full-width 32-bit values, because
|
||||
// it's fewer instructions than promoting and then saturating.
|
||||
if constexpr (!kIsIntegerArithmeticSafe<int32_t, T, U> &&
|
||||
!kIsIntegerArithmeticSafe<uint32_t, T, U>) {
|
||||
V result;
|
||||
return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
|
||||
? result
|
||||
: CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
|
||||
} else {
|
||||
static_assert(kIsFastIntegerArithmeticPromotionContained<T, U>);
|
||||
using Promotion = FastIntegerArithmeticPromotion<T, U>;
|
||||
return saturated_cast<V>(static_cast<Promotion>(x) *
|
||||
static_cast<Promotion>(y));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace v8::base::internal
|
||||
|
||||
#endif // V8_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
|
163
deps/v8/src/base/numerics/safe_math_clang_gcc_impl.h
vendored
Normal file
163
deps/v8/src/base/numerics/safe_math_clang_gcc_impl.h
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
|
||||
|
||||
// IWYU pragma: private
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
|
||||
#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
|
||||
#include "src/base/numerics/safe_math_arm_impl.h" // IWYU pragma: export
|
||||
#define BASE_HAS_ASSEMBLER_SAFE_MATH (1)
|
||||
#else
|
||||
#define BASE_HAS_ASSEMBLER_SAFE_MATH (0)
|
||||
#endif
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
// These are the non-functioning boilerplate implementations of the optimized
|
||||
// safe math routines.
|
||||
#if !BASE_HAS_ASSEMBLER_SAFE_MATH
|
||||
template <typename T, typename U>
|
||||
struct CheckedMulFastAsmOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T, U, V*) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<bool>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedAddFastAsmOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr V Do(T, U) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<V>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedSubFastAsmOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr V Do(T, U) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<V>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedMulFastAsmOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr V Do(T, U) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<V>();
|
||||
}
|
||||
};
|
||||
#endif // BASE_HAS_ASSEMBLER_SAFE_MATH
|
||||
#undef BASE_HAS_ASSEMBLER_SAFE_MATH
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedAddFastOp {
|
||||
static const bool is_supported = true;
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
|
||||
return !__builtin_add_overflow(x, y, result);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedSubFastOp {
|
||||
static const bool is_supported = true;
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
|
||||
return !__builtin_sub_overflow(x, y, result);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedMulFastOp {
|
||||
#if defined(__clang__)
|
||||
// TODO(jschuh): Get the Clang runtime library issues sorted out so we can
|
||||
// support full-width, mixed-sign multiply builtins.
|
||||
// https://crbug.com/613003
|
||||
// We can support intptr_t, uintptr_t, or a smaller common type.
|
||||
static const bool is_supported =
|
||||
(kIsTypeInRangeForNumericType<intptr_t, T> &&
|
||||
kIsTypeInRangeForNumericType<intptr_t, U>) ||
|
||||
(kIsTypeInRangeForNumericType<uintptr_t, T> &&
|
||||
kIsTypeInRangeForNumericType<uintptr_t, U>);
|
||||
#else
|
||||
static const bool is_supported = true;
|
||||
#endif
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
|
||||
return CheckedMulFastAsmOp<T, U>::is_supported
|
||||
? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
|
||||
: !__builtin_mul_overflow(x, y, result);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedAddFastOp {
|
||||
static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static V Do(T x, U y) {
|
||||
return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedSubFastOp {
|
||||
static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static V Do(T x, U y) {
|
||||
return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedMulFastOp {
|
||||
static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
|
||||
template <typename V>
|
||||
__attribute__((always_inline)) static V Do(T x, U y) {
|
||||
return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ClampedNegFastOp {
|
||||
static const bool is_supported = std::is_signed_v<T>;
|
||||
__attribute__((always_inline)) static T Do(T value) {
|
||||
// Use this when there is no assembler path available.
|
||||
if (!ClampedSubFastAsmOp<T, T>::is_supported) {
|
||||
T result;
|
||||
return !__builtin_sub_overflow(T(0), value, &result)
|
||||
? result
|
||||
: std::numeric_limits<T>::max();
|
||||
}
|
||||
|
||||
// Fallback to the normal subtraction path.
|
||||
return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
|
208
deps/v8/src/base/numerics/safe_math_shared_impl.h
vendored
Normal file
208
deps/v8/src/base/numerics/safe_math_shared_impl.h
vendored
Normal file
|
@ -0,0 +1,208 @@
|
|||
// Copyright 2017 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
|
||||
#define V8_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
|
||||
|
||||
// IWYU pragma: private
|
||||
|
||||
#include <concepts>
|
||||
#include <type_traits>
|
||||
|
||||
#include "build/build_config.h"
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
|
||||
#if defined(__asmjs__) || defined(__wasm__)
|
||||
// Optimized safe math instructions are incompatible with asmjs.
|
||||
#define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
|
||||
// Where available use builtin math overflow support on Clang and GCC.
|
||||
#elif !defined(__native_client__) && \
|
||||
((defined(__clang__) && \
|
||||
((__clang_major__ > 3) || \
|
||||
(__clang_major__ == 3 && __clang_minor__ >= 4))) || \
|
||||
(defined(__GNUC__) && __GNUC__ >= 5))
|
||||
#include "src/base/numerics/safe_math_clang_gcc_impl.h" // IWYU pragma: export
|
||||
#define BASE_HAS_OPTIMIZED_SAFE_MATH (1)
|
||||
#else
|
||||
#define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
|
||||
#endif
|
||||
|
||||
namespace v8::base {
|
||||
namespace internal {
|
||||
|
||||
// These are the non-functioning boilerplate implementations of the optimized
|
||||
// safe math routines.
|
||||
#if !BASE_HAS_OPTIMIZED_SAFE_MATH
|
||||
template <typename T, typename U>
|
||||
struct CheckedAddFastOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T, U, V*) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<bool>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedSubFastOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T, U, V*) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<bool>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct CheckedMulFastOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr bool Do(T, U, V*) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<bool>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedAddFastOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr V Do(T, U) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<V>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedSubFastOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr V Do(T, U) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<V>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename U>
|
||||
struct ClampedMulFastOp {
|
||||
static const bool is_supported = false;
|
||||
template <typename V>
|
||||
static constexpr V Do(T, U) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<V>();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ClampedNegFastOp {
|
||||
static const bool is_supported = false;
|
||||
static constexpr T Do(T) {
|
||||
// Force a compile failure if instantiated.
|
||||
return CheckOnFailure::template HandleFailure<T>();
|
||||
}
|
||||
};
|
||||
#endif // BASE_HAS_OPTIMIZED_SAFE_MATH
|
||||
#undef BASE_HAS_OPTIMIZED_SAFE_MATH
|
||||
|
||||
// This is used for UnsignedAbs, where we need to support floating-point
|
||||
// template instantiations even though we don't actually support the operations.
|
||||
// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
|
||||
// so the float versions will not compile.
|
||||
template <typename Numeric>
|
||||
struct UnsignedOrFloatForSize;
|
||||
|
||||
template <typename Numeric>
|
||||
requires(std::integral<Numeric>)
|
||||
struct UnsignedOrFloatForSize<Numeric> {
|
||||
using type = typename std::make_unsigned<Numeric>::type;
|
||||
};
|
||||
|
||||
template <typename Numeric>
|
||||
requires(std::floating_point<Numeric>)
|
||||
struct UnsignedOrFloatForSize<Numeric> {
|
||||
using type = Numeric;
|
||||
};
|
||||
|
||||
// Wrap the unary operations to allow SFINAE when instantiating integrals versus
|
||||
// floating points. These don't perform any overflow checking. Rather, they
|
||||
// exhibit well-defined overflow semantics and rely on the caller to detect
|
||||
// if an overflow occurred.
|
||||
|
||||
template <typename T>
|
||||
requires(std::integral<T>)
|
||||
constexpr T NegateWrapper(T value) {
|
||||
using UnsignedT = typename std::make_unsigned<T>::type;
|
||||
// This will compile to a NEG on Intel, and is normal negation on ARM.
|
||||
return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::floating_point<T>)
|
||||
constexpr T NegateWrapper(T value) {
|
||||
return -value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::integral<T>)
|
||||
constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
|
||||
return ~value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::integral<T>)
|
||||
constexpr T AbsWrapper(T value) {
|
||||
return static_cast<T>(SafeUnsignedAbs(value));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::floating_point<T>)
|
||||
constexpr T AbsWrapper(T value) {
|
||||
return value < 0 ? -value : value;
|
||||
}
|
||||
|
||||
template <template <typename, typename> class M, typename L, typename R,
|
||||
typename Math = M<UnderlyingType<L>, UnderlyingType<R>>>
|
||||
requires requires { typename Math::result_type; }
|
||||
struct MathWrapper {
|
||||
using math = Math;
|
||||
using type = typename math::result_type;
|
||||
};
|
||||
|
||||
// The following macros are just boilerplate for the standard arithmetic
|
||||
// operator overloads and variadic function templates. A macro isn't the nicest
|
||||
// solution, but it beats rewriting these over and over again.
|
||||
#define BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME) \
|
||||
template <typename L, typename R, typename... Args> \
|
||||
constexpr auto CL_ABBR##OP_NAME(L lhs, R rhs, Args... args) { \
|
||||
return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, \
|
||||
args...); \
|
||||
}
|
||||
|
||||
#define BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP, CMP_OP) \
|
||||
/* Binary arithmetic operator for all CLASS##Numeric operations. */ \
|
||||
template <typename L, typename R> \
|
||||
requires(Is##CLASS##Op<L, R>) \
|
||||
constexpr CLASS##Numeric<typename MathWrapper<CLASS##OP_NAME##Op, L, \
|
||||
R>::type> operator OP(L lhs, \
|
||||
R rhs) { \
|
||||
return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs, \
|
||||
rhs); \
|
||||
} \
|
||||
/* Assignment arithmetic operator implementation from CLASS##Numeric. */ \
|
||||
template <typename L> \
|
||||
requires std::is_arithmetic_v<L> \
|
||||
template <typename R> \
|
||||
constexpr CLASS##Numeric<L>& CLASS##Numeric<L>::operator CMP_OP(R rhs) { \
|
||||
return MathOp<CLASS##OP_NAME##Op>(rhs); \
|
||||
} \
|
||||
/* Variadic arithmetic functions that return CLASS##Numeric. */ \
|
||||
BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
|
45
deps/v8/src/base/numerics/wrapping_math.h
vendored
Normal file
45
deps/v8/src/base/numerics/wrapping_math.h
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2023 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2025 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_BASE_NUMERICS_WRAPPING_MATH_H_
|
||||
#define V8_BASE_NUMERICS_WRAPPING_MATH_H_
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
namespace v8::base {
|
||||
|
||||
// Returns `a + b` with overflow defined to wrap around, i.e. modulo 2^N where N
|
||||
// is the bit width of `T`.
|
||||
template <typename T>
|
||||
inline constexpr T WrappingAdd(T a, T b) {
|
||||
static_assert(std::is_integral_v<T>);
|
||||
// Unsigned arithmetic wraps, so convert to the corresponding unsigned type.
|
||||
// Note that, if `T` is smaller than `int`, e.g. `int16_t`, the values are
|
||||
// promoted to `int`, which brings us back to undefined overflow. This is fine
|
||||
// here because the sum of any two `int16_t`s fits in `int`, but `WrappingMul`
|
||||
// will need a more complex implementation.
|
||||
using Unsigned = std::make_unsigned_t<T>;
|
||||
return static_cast<T>(static_cast<Unsigned>(a) + static_cast<Unsigned>(b));
|
||||
}
|
||||
|
||||
// Returns `a - b` with overflow defined to wrap around, i.e. modulo 2^N where N
|
||||
// is the bit width of `T`.
|
||||
template <typename T>
|
||||
inline constexpr T WrappingSub(T a, T b) {
|
||||
static_assert(std::is_integral_v<T>);
|
||||
// Unsigned arithmetic wraps, so convert to the corresponding unsigned type.
|
||||
// Note that, if `T` is smaller than `int`, e.g. `int16_t`, the values are
|
||||
// promoted to `int`, which brings us back to undefined overflow. This is fine
|
||||
// here because the difference of any two `int16_t`s fits in `int`, but
|
||||
// `WrappingMul` will need a more complex implementation.
|
||||
using Unsigned = std::make_unsigned_t<T>;
|
||||
return static_cast<T>(static_cast<Unsigned>(a) - static_cast<Unsigned>(b));
|
||||
}
|
||||
|
||||
} // namespace v8::base
|
||||
|
||||
#endif // V8_BASE_NUMERICS_WRAPPING_MATH_H_
|
45
deps/v8/src/base/platform/time.h
vendored
45
deps/v8/src/base/platform/time.h
vendored
|
@ -14,7 +14,7 @@
|
|||
#include "src/base/base-export.h"
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/safe_conversions.h"
|
||||
#include "src/base/numerics/safe_conversions.h"
|
||||
#if V8_OS_WIN
|
||||
#include "src/base/win32-headers.h"
|
||||
#endif
|
||||
|
@ -56,6 +56,9 @@ class TimeConstants {
|
|||
static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
|
||||
static constexpr int64_t kNanosecondsPerSecond =
|
||||
kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
|
||||
|
||||
// Support defaulted comparison of subclasses.
|
||||
constexpr auto operator<=>(const TimeConstants&) const = default;
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -189,25 +192,7 @@ class V8_BASE_EXPORT TimeDelta final {
|
|||
return delta_ / other.delta_;
|
||||
}
|
||||
|
||||
// Comparison operators.
|
||||
constexpr bool operator==(const TimeDelta& other) const {
|
||||
return delta_ == other.delta_;
|
||||
}
|
||||
constexpr bool operator!=(const TimeDelta& other) const {
|
||||
return delta_ != other.delta_;
|
||||
}
|
||||
constexpr bool operator<(const TimeDelta& other) const {
|
||||
return delta_ < other.delta_;
|
||||
}
|
||||
constexpr bool operator<=(const TimeDelta& other) const {
|
||||
return delta_ <= other.delta_;
|
||||
}
|
||||
constexpr bool operator>(const TimeDelta& other) const {
|
||||
return delta_ > other.delta_;
|
||||
}
|
||||
constexpr bool operator>=(const TimeDelta& other) const {
|
||||
return delta_ >= other.delta_;
|
||||
}
|
||||
constexpr auto operator<=>(const TimeDelta&) const = default;
|
||||
|
||||
friend void swap(TimeDelta a, TimeDelta b) { std::swap(a.delta_, b.delta_); }
|
||||
|
||||
|
@ -322,25 +307,7 @@ class TimeBase : public TimeConstants {
|
|||
return static_cast<TimeClass&>(*this = (*this - delta));
|
||||
}
|
||||
|
||||
// Comparison operators
|
||||
bool operator==(const TimeBase<TimeClass>& other) const {
|
||||
return us_ == other.us_;
|
||||
}
|
||||
bool operator!=(const TimeBase<TimeClass>& other) const {
|
||||
return us_ != other.us_;
|
||||
}
|
||||
bool operator<(const TimeBase<TimeClass>& other) const {
|
||||
return us_ < other.us_;
|
||||
}
|
||||
bool operator<=(const TimeBase<TimeClass>& other) const {
|
||||
return us_ <= other.us_;
|
||||
}
|
||||
bool operator>(const TimeBase<TimeClass>& other) const {
|
||||
return us_ > other.us_;
|
||||
}
|
||||
bool operator>=(const TimeBase<TimeClass>& other) const {
|
||||
return us_ >= other.us_;
|
||||
}
|
||||
constexpr auto operator<=>(const TimeBase&) const = default;
|
||||
|
||||
// Converts an integer value representing TimeClass to a class. This is used
|
||||
// when deserializing a |TimeClass| structure, using a value known to be
|
||||
|
|
35
deps/v8/src/base/region-allocator.cc
vendored
35
deps/v8/src/base/region-allocator.cc
vendored
|
@ -176,8 +176,14 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size,
|
|||
DCHECK_NE(region_state, RegionState::kFree);
|
||||
|
||||
Address requested_end = requested_address + size;
|
||||
DCHECK_LE(requested_end, end());
|
||||
|
||||
// Fail if the region would outgrow the total reservation or the addition
|
||||
// overflows.
|
||||
if (requested_end > end() || requested_end < requested_address) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DCHECK_LE(requested_end, end());
|
||||
Region* region;
|
||||
{
|
||||
AllRegionsSet::iterator region_iter = FindRegion(requested_address);
|
||||
|
@ -321,6 +327,33 @@ size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
|
|||
return size;
|
||||
}
|
||||
|
||||
bool RegionAllocator::TryGrowRegion(Address address, size_t new_size) {
|
||||
DCHECK(IsAligned(new_size, page_size_));
|
||||
|
||||
AllRegionsSet::iterator region_iter = FindRegion(address);
|
||||
if (region_iter == all_regions_.end()) {
|
||||
return false;
|
||||
}
|
||||
Region* region = *region_iter;
|
||||
if (region->begin() != address || !region->is_allocated()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The region must not be in the free list.
|
||||
DCHECK_EQ(free_regions_.find(*region_iter), free_regions_.end());
|
||||
DCHECK_LT(region->size(), new_size);
|
||||
|
||||
if (!AllocateRegionAt(region->end(), new_size - region->size())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
AllRegionsSet::iterator new_region_iter = std::next(region_iter);
|
||||
DCHECK_NE(new_region_iter, all_regions_.end());
|
||||
|
||||
Merge(region_iter, new_region_iter);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t RegionAllocator::CheckRegion(Address address) {
|
||||
AllRegionsSet::iterator region_iter = FindRegion(address);
|
||||
if (region_iter == all_regions_.end()) {
|
||||
|
|
4
deps/v8/src/base/region-allocator.h
vendored
4
deps/v8/src/base/region-allocator.h
vendored
|
@ -105,6 +105,10 @@ class V8_BASE_EXPORT RegionAllocator final {
|
|||
// frees the region.
|
||||
size_t TrimRegion(Address address, size_t new_size);
|
||||
|
||||
// Tries to grow the region at |address| to the size |new_size|. Returns true
|
||||
// on success.
|
||||
bool TryGrowRegion(Address address, size_t new_size);
|
||||
|
||||
// If there is a used region starting at given address returns its size
|
||||
// otherwise 0.
|
||||
size_t CheckRegion(Address address);
|
||||
|
|
56
deps/v8/src/base/safe_conversions_arm_impl.h
vendored
56
deps/v8/src/base/safe_conversions_arm_impl.h
vendored
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// List of adaptations:
|
||||
// - include guard names
|
||||
// - wrap in v8 namespace
|
||||
// - include paths
|
||||
|
||||
#ifndef V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
|
||||
#define V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/safe_conversions_impl.h"
|
||||
|
||||
namespace v8::base::internal {
|
||||
|
||||
// Fast saturation to a destination type.
|
||||
template <typename Dst, typename Src>
|
||||
struct SaturateFastAsmOp {
|
||||
static constexpr bool is_supported =
|
||||
kEnableAsmCode && std::is_signed_v<Src> && std::is_integral_v<Dst> &&
|
||||
std::is_integral_v<Src> &&
|
||||
IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
|
||||
IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
|
||||
!IsTypeInRangeForNumericType<Dst, Src>::value;
|
||||
|
||||
__attribute__((always_inline)) static Dst Do(Src value) {
|
||||
int32_t src = value;
|
||||
typename std::conditional<std::is_signed_v<Dst>, int32_t, uint32_t>::type
|
||||
result;
|
||||
if (std::is_signed_v<Dst>) {
|
||||
asm("ssat %[dst], %[shift], %[src]"
|
||||
: [dst] "=r"(result)
|
||||
: [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
|
||||
? IntegerBitsPlusSign<Dst>::value
|
||||
: 32));
|
||||
} else {
|
||||
asm("usat %[dst], %[shift], %[src]"
|
||||
: [dst] "=r"(result)
|
||||
: [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
|
||||
? IntegerBitsPlusSign<Dst>::value
|
||||
: 31));
|
||||
}
|
||||
return static_cast<Dst>(result);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace v8::base::internal
|
||||
|
||||
#endif // V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
|
798
deps/v8/src/base/safe_conversions_impl.h
vendored
798
deps/v8/src/base/safe_conversions_impl.h
vendored
|
@ -1,798 +0,0 @@
|
|||
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Slightly adapted for inclusion in V8.
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// List of adaptations:
|
||||
// - include guard names
|
||||
// - wrap in v8 namespace
|
||||
// - formatting (git cl format)
|
||||
|
||||
#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_
|
||||
#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <concepts>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
namespace v8::base::internal {
|
||||
|
||||
// The std library doesn't provide a binary max_exponent for integers, however
|
||||
// we can compute an analog using std::numeric_limits<>::digits.
|
||||
template <typename NumericType>
|
||||
struct MaxExponent {
|
||||
static const int value = std::is_floating_point_v<NumericType>
|
||||
? std::numeric_limits<NumericType>::max_exponent
|
||||
: std::numeric_limits<NumericType>::digits + 1;
|
||||
};
|
||||
|
||||
// The number of bits (including the sign) in an integer. Eliminates sizeof
|
||||
// hacks.
|
||||
template <typename NumericType>
|
||||
struct IntegerBitsPlusSign {
|
||||
static const int value =
|
||||
std::numeric_limits<NumericType>::digits + std::is_signed_v<NumericType>;
|
||||
};
|
||||
|
||||
// Helper templates for integer manipulations.
|
||||
|
||||
template <typename Integer>
|
||||
struct PositionOfSignBit {
|
||||
static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
|
||||
};
|
||||
|
||||
// Determines if a numeric value is negative without throwing compiler
|
||||
// warnings on: unsigned(value) < 0.
|
||||
template <typename T>
|
||||
requires(std::is_arithmetic_v<T> && std::is_signed_v<T>)
|
||||
constexpr bool IsValueNegative(T value) {
|
||||
return value < 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::is_arithmetic_v<T> && std::is_unsigned_v<T>)
|
||||
constexpr bool IsValueNegative(T) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// This performs a fast negation, returning a signed value. It works on unsigned
|
||||
// arguments, but probably doesn't do what you want for any unsigned value
|
||||
// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
|
||||
template <typename T>
|
||||
constexpr typename std::make_signed<T>::type ConditionalNegate(
|
||||
T x, bool is_negative) {
|
||||
static_assert(std::is_integral_v<T>, "Type must be integral");
|
||||
using SignedT = typename std::make_signed<T>::type;
|
||||
using UnsignedT = typename std::make_unsigned<T>::type;
|
||||
return static_cast<SignedT>((static_cast<UnsignedT>(x) ^
|
||||
static_cast<UnsignedT>(-SignedT(is_negative))) +
|
||||
is_negative);
|
||||
}
|
||||
|
||||
// This performs a safe, absolute value via unsigned overflow.
|
||||
template <typename T>
|
||||
constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
|
||||
static_assert(std::is_integral_v<T>, "Type must be integral");
|
||||
using UnsignedT = typename std::make_unsigned<T>::type;
|
||||
return IsValueNegative(value)
|
||||
? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
|
||||
: static_cast<UnsignedT>(value);
|
||||
}
|
||||
|
||||
// TODO(jschuh): Switch to std::is_constant_evaluated() once C++20 is supported.
|
||||
// Alternately, the usage could be restructured for "consteval if" in C++23.
|
||||
#define IsConstantEvaluated() (__builtin_is_constant_evaluated())
|
||||
|
||||
// TODO(jschuh): Debug builds don't reliably propagate constants, so we restrict
|
||||
// some accelerated runtime paths to release builds until this can be forced
|
||||
// with consteval support in C++20 or C++23.
|
||||
#if defined(NDEBUG)
|
||||
constexpr bool kEnableAsmCode = true;
|
||||
#else
|
||||
constexpr bool kEnableAsmCode = false;
|
||||
#endif
|
||||
|
||||
// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
|
||||
// Also used in a constexpr template to trigger a compilation failure on
|
||||
// an error condition.
|
||||
struct CheckOnFailure {
|
||||
template <typename T>
|
||||
static T HandleFailure() {
|
||||
#if defined(_MSC_VER)
|
||||
__debugbreak();
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
__builtin_trap();
|
||||
#else
|
||||
((void)(*(volatile char*)0 = 0));
|
||||
#endif
|
||||
return T();
|
||||
}
|
||||
};
|
||||
|
||||
enum IntegerRepresentation {
|
||||
INTEGER_REPRESENTATION_UNSIGNED,
|
||||
INTEGER_REPRESENTATION_SIGNED
|
||||
};
|
||||
|
||||
// A range for a given nunmeric Src type is contained for a given numeric Dst
|
||||
// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
|
||||
// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
|
||||
// We implement this as template specializations rather than simple static
|
||||
// comparisons to ensure type correctness in our comparisons.
|
||||
enum NumericRangeRepresentation {
|
||||
NUMERIC_RANGE_NOT_CONTAINED,
|
||||
NUMERIC_RANGE_CONTAINED
|
||||
};
|
||||
|
||||
// Helper templates to statically determine if our destination type can contain
|
||||
// maximum and minimum values represented by the source type.
|
||||
|
||||
template <typename Dst, typename Src,
|
||||
IntegerRepresentation DstSign = std::is_signed_v<Dst>
|
||||
? INTEGER_REPRESENTATION_SIGNED
|
||||
: INTEGER_REPRESENTATION_UNSIGNED,
|
||||
IntegerRepresentation SrcSign = std::is_signed_v<Src>
|
||||
? INTEGER_REPRESENTATION_SIGNED
|
||||
: INTEGER_REPRESENTATION_UNSIGNED>
|
||||
struct StaticDstRangeRelationToSrcRange;
|
||||
|
||||
// Same sign: Dst is guaranteed to contain Src only if its range is equal or
|
||||
// larger.
|
||||
template <typename Dst, typename Src, IntegerRepresentation Sign>
|
||||
struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
|
||||
static const NumericRangeRepresentation value =
|
||||
MaxExponent<Dst>::value >= MaxExponent<Src>::value
|
||||
? NUMERIC_RANGE_CONTAINED
|
||||
: NUMERIC_RANGE_NOT_CONTAINED;
|
||||
};
|
||||
|
||||
// Unsigned to signed: Dst is guaranteed to contain source only if its range is
|
||||
// larger.
|
||||
template <typename Dst, typename Src>
|
||||
struct StaticDstRangeRelationToSrcRange<Dst,
|
||||
Src,
|
||||
INTEGER_REPRESENTATION_SIGNED,
|
||||
INTEGER_REPRESENTATION_UNSIGNED> {
|
||||
static const NumericRangeRepresentation value =
|
||||
MaxExponent<Dst>::value > MaxExponent<Src>::value
|
||||
? NUMERIC_RANGE_CONTAINED
|
||||
: NUMERIC_RANGE_NOT_CONTAINED;
|
||||
};
|
||||
|
||||
// Signed to unsigned: Dst cannot be statically determined to contain Src.
|
||||
template <typename Dst, typename Src>
|
||||
struct StaticDstRangeRelationToSrcRange<Dst,
|
||||
Src,
|
||||
INTEGER_REPRESENTATION_UNSIGNED,
|
||||
INTEGER_REPRESENTATION_SIGNED> {
|
||||
static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
|
||||
};
|
||||
|
||||
// This class wraps the range constraints as separate booleans so the compiler
|
||||
// can identify constants and eliminate unused code paths.
|
||||
class RangeCheck {
|
||||
public:
|
||||
constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
|
||||
: is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
|
||||
constexpr RangeCheck() : is_underflow_(false), is_overflow_(false) {}
|
||||
constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
|
||||
constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
|
||||
constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
|
||||
constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
|
||||
constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
|
||||
constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
|
||||
constexpr bool operator==(const RangeCheck rhs) const {
|
||||
return is_underflow_ == rhs.is_underflow_ &&
|
||||
is_overflow_ == rhs.is_overflow_;
|
||||
}
|
||||
constexpr bool operator!=(const RangeCheck rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
private:
|
||||
// Do not change the order of these member variables. The integral conversion
|
||||
// optimization depends on this exact order.
|
||||
const bool is_underflow_;
|
||||
const bool is_overflow_;
|
||||
};
|
||||
|
||||
// The following helper template addresses a corner case in range checks for
|
||||
// conversion from a floating-point type to an integral type of smaller range
|
||||
// but larger precision (e.g. float -> unsigned). The problem is as follows:
|
||||
// 1. Integral maximum is always one less than a power of two, so it must be
|
||||
// truncated to fit the mantissa of the floating point. The direction of
|
||||
// rounding is implementation defined, but by default it's always IEEE
|
||||
// floats, which round to nearest and thus result in a value of larger
|
||||
// magnitude than the integral value.
|
||||
// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
|
||||
// // is 4294967295u.
|
||||
// 2. If the floating point value is equal to the promoted integral maximum
|
||||
// value, a range check will erroneously pass.
|
||||
// Example: (4294967296f <= 4294967295u) // This is true due to a precision
|
||||
// // loss in rounding up to float.
|
||||
// 3. When the floating point value is then converted to an integral, the
|
||||
// resulting value is out of range for the target integral type and
|
||||
// thus is implementation defined.
|
||||
// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
|
||||
// To fix this bug we manually truncate the maximum value when the destination
|
||||
// type is an integral of larger precision than the source floating-point type,
|
||||
// such that the resulting maximum is represented exactly as a floating point.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct NarrowingRange {
|
||||
using SrcLimits = std::numeric_limits<Src>;
|
||||
using DstLimits = typename std::numeric_limits<Dst>;
|
||||
|
||||
// Computes the mask required to make an accurate comparison between types.
|
||||
static const int kShift =
|
||||
(MaxExponent<Src>::value > MaxExponent<Dst>::value &&
|
||||
SrcLimits::digits < DstLimits::digits)
|
||||
? (DstLimits::digits - SrcLimits::digits)
|
||||
: 0;
|
||||
|
||||
template <typename T>
|
||||
requires(std::integral<T>)
|
||||
// Masks out the integer bits that are beyond the precision of the
|
||||
// intermediate type used for comparison.
|
||||
static constexpr T Adjust(T value) {
|
||||
static_assert(std::is_same_v<T, Dst>, "");
|
||||
static_assert(kShift < DstLimits::digits, "");
|
||||
using UnsignedDst = typename std::make_unsigned_t<T>;
|
||||
return static_cast<T>(ConditionalNegate(
|
||||
SafeUnsignedAbs(value) & ~((UnsignedDst{1} << kShift) - UnsignedDst{1}),
|
||||
IsValueNegative(value)));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(std::floating_point<T>)
|
||||
static constexpr T Adjust(T value) {
|
||||
static_assert(std::is_same_v<T, Dst>, "");
|
||||
static_assert(kShift == 0, "");
|
||||
return value;
|
||||
}
|
||||
|
||||
static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
|
||||
static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
|
||||
};
|
||||
|
||||
template <typename Dst, typename Src, template <typename> class Bounds,
|
||||
IntegerRepresentation DstSign = std::is_signed_v<Dst>
|
||||
? INTEGER_REPRESENTATION_SIGNED
|
||||
: INTEGER_REPRESENTATION_UNSIGNED,
|
||||
IntegerRepresentation SrcSign = std::is_signed_v<Src>
|
||||
? INTEGER_REPRESENTATION_SIGNED
|
||||
: INTEGER_REPRESENTATION_UNSIGNED,
|
||||
NumericRangeRepresentation DstRange =
|
||||
StaticDstRangeRelationToSrcRange<Dst, Src>::value>
|
||||
struct DstRangeRelationToSrcRangeImpl;
|
||||
|
||||
// The following templates are for ranges that must be verified at runtime. We
|
||||
// split it into checks based on signedness to avoid confusing casts and
|
||||
// compiler warnings on signed an unsigned comparisons.
|
||||
|
||||
// Same sign narrowing: The range is contained for normal limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds,
|
||||
IntegerRepresentation DstSign, IntegerRepresentation SrcSign>
|
||||
struct DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds, DstSign, SrcSign,
|
||||
NUMERIC_RANGE_CONTAINED> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using SrcLimits = std::numeric_limits<Src>;
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
return RangeCheck(
|
||||
static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
|
||||
static_cast<Dst>(value) >= DstLimits::lowest(),
|
||||
static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
|
||||
static_cast<Dst>(value) <= DstLimits::max());
|
||||
}
|
||||
};
|
||||
|
||||
// Signed to signed narrowing: Both the upper and lower boundaries may be
|
||||
// exceeded for standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, INTEGER_REPRESENTATION_SIGNED,
|
||||
INTEGER_REPRESENTATION_SIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
|
||||
}
|
||||
};
|
||||
|
||||
// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
|
||||
// standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, INTEGER_REPRESENTATION_UNSIGNED,
|
||||
INTEGER_REPRESENTATION_UNSIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
return RangeCheck(
|
||||
DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
|
||||
value <= DstLimits::max());
|
||||
}
|
||||
};
|
||||
|
||||
// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, INTEGER_REPRESENTATION_SIGNED,
|
||||
INTEGER_REPRESENTATION_UNSIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
using Promotion = decltype(Src() + Dst());
|
||||
return RangeCheck(DstLimits::lowest() <= Dst(0) ||
|
||||
static_cast<Promotion>(value) >=
|
||||
static_cast<Promotion>(DstLimits::lowest()),
|
||||
static_cast<Promotion>(value) <=
|
||||
static_cast<Promotion>(DstLimits::max()));
|
||||
}
|
||||
};
|
||||
|
||||
// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
|
||||
// and any negative value exceeds the lower boundary for standard limits.
|
||||
template <typename Dst, typename Src, template <typename> class Bounds>
|
||||
struct DstRangeRelationToSrcRangeImpl<
|
||||
Dst, Src, Bounds, INTEGER_REPRESENTATION_UNSIGNED,
|
||||
INTEGER_REPRESENTATION_SIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
|
||||
static constexpr RangeCheck Check(Src value) {
|
||||
using SrcLimits = std::numeric_limits<Src>;
|
||||
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
|
||||
using Promotion = decltype(Src() + Dst());
|
||||
bool ge_zero = false;
|
||||
// Converting floating-point to integer will discard fractional part, so
|
||||
// values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
|
||||
if (std::is_floating_point_v<Src>) {
|
||||
ge_zero = value > Src(-1);
|
||||
} else {
|
||||
ge_zero = value >= Src(0);
|
||||
}
|
||||
return RangeCheck(
|
||||
ge_zero && (DstLimits::lowest() == 0 ||
|
||||
static_cast<Dst>(value) >= DstLimits::lowest()),
|
||||
static_cast<Promotion>(SrcLimits::max()) <=
|
||||
static_cast<Promotion>(DstLimits::max()) ||
|
||||
static_cast<Promotion>(value) <=
|
||||
static_cast<Promotion>(DstLimits::max()));
|
||||
}
|
||||
};
|
||||
|
||||
// Simple wrapper for statically checking if a type's range is contained.
|
||||
template <typename Dst, typename Src>
|
||||
struct IsTypeInRangeForNumericType {
|
||||
static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
|
||||
NUMERIC_RANGE_CONTAINED;
|
||||
};
|
||||
|
||||
template <typename Dst, template <typename> class Bounds = std::numeric_limits,
|
||||
typename Src>
|
||||
constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
|
||||
static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
|
||||
static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
|
||||
static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
|
||||
return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
|
||||
}
|
||||
|
||||
// Integer promotion templates used by the portable checked integer arithmetic.
|
||||
template <size_t Size, bool IsSigned>
|
||||
struct IntegerForDigitsAndSign;
|
||||
|
||||
#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
|
||||
template <> \
|
||||
struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
|
||||
std::is_signed_v<I>> { \
|
||||
using type = I; \
|
||||
}
|
||||
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
|
||||
INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
|
||||
#undef INTEGER_FOR_DIGITS_AND_SIGN
|
||||
|
||||
// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
|
||||
// support 128-bit math, then the ArithmeticPromotion template below will need
|
||||
// to be updated (or more likely replaced with a decltype expression).
|
||||
static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
|
||||
"Max integer size not supported for this toolchain.");
|
||||
|
||||
template <typename Integer, bool IsSigned = std::is_signed_v<Integer>>
|
||||
struct TwiceWiderInteger {
|
||||
using type =
|
||||
typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
|
||||
IsSigned>::type;
|
||||
};
|
||||
|
||||
enum ArithmeticPromotionCategory {
|
||||
LEFT_PROMOTION, // Use the type of the left-hand argument.
|
||||
RIGHT_PROMOTION // Use the type of the right-hand argument.
|
||||
};
|
||||
|
||||
// Determines the type that can represent the largest positive value.
|
||||
template <typename Lhs, typename Rhs,
|
||||
ArithmeticPromotionCategory Promotion =
|
||||
(MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
|
||||
? LEFT_PROMOTION
|
||||
: RIGHT_PROMOTION>
|
||||
struct MaxExponentPromotion;
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
|
||||
using type = Lhs;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
|
||||
using type = Rhs;
|
||||
};
|
||||
|
||||
// Determines the type that can represent the lowest arithmetic value.
|
||||
template <typename Lhs, typename Rhs,
|
||||
ArithmeticPromotionCategory Promotion =
|
||||
std::is_signed_v<Lhs>
|
||||
? (std::is_signed_v<Rhs>
|
||||
? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
|
||||
? LEFT_PROMOTION
|
||||
: RIGHT_PROMOTION)
|
||||
: LEFT_PROMOTION)
|
||||
: (std::is_signed_v<Rhs>
|
||||
? RIGHT_PROMOTION
|
||||
: (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
|
||||
? LEFT_PROMOTION
|
||||
: RIGHT_PROMOTION))>
|
||||
struct LowestValuePromotion;
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
|
||||
using type = Lhs;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
|
||||
using type = Rhs;
|
||||
};
|
||||
|
||||
// Determines the type that is best able to represent an arithmetic result.
|
||||
template <
|
||||
typename Lhs, typename Rhs = Lhs,
|
||||
bool is_intmax_type =
|
||||
std::is_integral_v<typename MaxExponentPromotion<Lhs, Rhs>::type> &&
|
||||
IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
|
||||
value == IntegerBitsPlusSign<intmax_t>::value,
|
||||
bool is_max_exponent =
|
||||
StaticDstRangeRelationToSrcRange<
|
||||
typename MaxExponentPromotion<Lhs, Rhs>::type, Lhs>::value ==
|
||||
NUMERIC_RANGE_CONTAINED &&
|
||||
StaticDstRangeRelationToSrcRange<
|
||||
typename MaxExponentPromotion<Lhs, Rhs>::type, Rhs>::value ==
|
||||
NUMERIC_RANGE_CONTAINED>
|
||||
struct BigEnoughPromotion;
|
||||
|
||||
// The side with the max exponent is big enough.
|
||||
template <typename Lhs, typename Rhs, bool is_intmax_type>
|
||||
struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
|
||||
using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
|
||||
static const bool is_contained = true;
|
||||
};
|
||||
|
||||
// We can use a twice wider type to fit.
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct BigEnoughPromotion<Lhs, Rhs, false, false> {
|
||||
using type =
|
||||
typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
|
||||
std::is_signed_v<Lhs> ||
|
||||
std::is_signed_v<Rhs>>::type;
|
||||
static const bool is_contained = true;
|
||||
};
|
||||
|
||||
// No type is large enough.
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct BigEnoughPromotion<Lhs, Rhs, true, false> {
|
||||
using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
|
||||
static const bool is_contained = false;
|
||||
};
|
||||
|
||||
// We can statically check if operations on the provided types can wrap, so we
|
||||
// can skip the checked operations if they're not needed. So, for an integer we
|
||||
// care if the destination type preserves the sign and is twice the width of
|
||||
// the source.
|
||||
template <typename T, typename Lhs, typename Rhs = Lhs>
|
||||
struct IsIntegerArithmeticSafe {
|
||||
static const bool value =
|
||||
!std::is_floating_point_v<T> && !std::is_floating_point_v<Lhs> &&
|
||||
!std::is_floating_point_v<Rhs> &&
|
||||
std::is_signed_v<T> >= std::is_signed_v<Lhs> &&
|
||||
IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
|
||||
std::is_signed_v<T> >= std::is_signed_v<Rhs> &&
|
||||
IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
|
||||
};
|
||||
|
||||
// Promotes to a type that can represent any possible result of a binary
|
||||
// arithmetic operation with the source types.
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct FastIntegerArithmeticPromotion {
|
||||
using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
|
||||
static const bool is_contained = false;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
requires(IsIntegerArithmeticSafe<
|
||||
std::conditional_t<std::is_signed_v<Lhs> || std::is_signed_v<Rhs>,
|
||||
intmax_t, uintmax_t>,
|
||||
typename MaxExponentPromotion<Lhs, Rhs>::type>::value)
|
||||
struct FastIntegerArithmeticPromotion<Lhs, Rhs> {
|
||||
using type =
|
||||
typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
|
||||
std::is_signed_v<Lhs> ||
|
||||
std::is_signed_v<Rhs>>::type;
|
||||
static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
|
||||
static const bool is_contained = true;
|
||||
};
|
||||
|
||||
// Extracts the underlying type from an enum.
|
||||
template <typename T>
|
||||
struct ArithmeticOrUnderlyingEnum {
|
||||
using type = T;
|
||||
static const bool value = std::is_arithmetic_v<type>;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires(std::is_enum_v<T>)
|
||||
struct ArithmeticOrUnderlyingEnum<T> {
|
||||
using type = typename std::underlying_type<T>::type;
|
||||
static const bool value = std::is_arithmetic_v<type>;
|
||||
};
|
||||
|
||||
// The following are helper templates used in the CheckedNumeric class.
|
||||
template <typename T>
|
||||
class CheckedNumeric;
|
||||
|
||||
template <typename T>
|
||||
class ClampedNumeric;
|
||||
|
||||
template <typename T>
|
||||
class StrictNumeric;
|
||||
|
||||
// Used to treat CheckedNumeric and arithmetic underlying types the same.
|
||||
template <typename T>
|
||||
struct UnderlyingType {
|
||||
using type = typename ArithmeticOrUnderlyingEnum<T>::type;
|
||||
static const bool is_numeric = std::is_arithmetic_v<type>;
|
||||
static const bool is_checked = false;
|
||||
static const bool is_clamped = false;
|
||||
static const bool is_strict = false;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct UnderlyingType<CheckedNumeric<T>> {
|
||||
using type = T;
|
||||
static const bool is_numeric = true;
|
||||
static const bool is_checked = true;
|
||||
static const bool is_clamped = false;
|
||||
static const bool is_strict = false;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct UnderlyingType<ClampedNumeric<T>> {
|
||||
using type = T;
|
||||
static const bool is_numeric = true;
|
||||
static const bool is_checked = false;
|
||||
static const bool is_clamped = true;
|
||||
static const bool is_strict = false;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct UnderlyingType<StrictNumeric<T>> {
|
||||
using type = T;
|
||||
static const bool is_numeric = true;
|
||||
static const bool is_checked = false;
|
||||
static const bool is_clamped = false;
|
||||
static const bool is_strict = true;
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsCheckedOp {
|
||||
static const bool value =
|
||||
UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
|
||||
(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsClampedOp {
|
||||
static const bool value =
|
||||
UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
|
||||
(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
|
||||
!(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsStrictOp {
|
||||
static const bool value =
|
||||
UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
|
||||
(UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
|
||||
!(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
|
||||
!(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
|
||||
};
|
||||
|
||||
// as_signed<> returns the supplied integral value (or integral castable
|
||||
// Numeric template) cast as a signed integral of equivalent precision.
|
||||
// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
|
||||
template <typename Src>
|
||||
constexpr typename std::make_signed<
|
||||
typename base::internal::UnderlyingType<Src>::type>::type
|
||||
as_signed(const Src value) {
|
||||
static_assert(std::is_integral_v<decltype(as_signed(value))>,
|
||||
"Argument must be a signed or unsigned integer type.");
|
||||
return static_cast<decltype(as_signed(value))>(value);
|
||||
}
|
||||
|
||||
// as_unsigned<> returns the supplied integral value (or integral castable
|
||||
// Numeric template) cast as an unsigned integral of equivalent precision.
|
||||
// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
|
||||
template <typename Src>
|
||||
constexpr typename std::make_unsigned<
|
||||
typename base::internal::UnderlyingType<Src>::type>::type
|
||||
as_unsigned(const Src value) {
|
||||
static_assert(std::is_integral_v<decltype(as_unsigned(value))>,
|
||||
"Argument must be a signed or unsigned integer type.");
|
||||
return static_cast<decltype(as_unsigned(value))>(value);
|
||||
}
|
||||
|
||||
template <typename L, typename R>
|
||||
constexpr bool IsLessImpl(const L lhs, const R rhs, const RangeCheck l_range,
|
||||
const RangeCheck r_range) {
|
||||
return l_range.IsUnderflow() || r_range.IsOverflow() ||
|
||||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <
|
||||
static_cast<decltype(lhs + rhs)>(rhs));
|
||||
}
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsLess {
|
||||
static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
|
||||
"Types must be numeric.");
|
||||
static constexpr bool Test(const L lhs, const R rhs) {
|
||||
return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
|
||||
DstRangeRelationToSrcRange<L>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
constexpr bool IsLessOrEqualImpl(const L lhs, const R rhs,
|
||||
const RangeCheck l_range,
|
||||
const RangeCheck r_range) {
|
||||
return l_range.IsUnderflow() || r_range.IsOverflow() ||
|
||||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <=
|
||||
static_cast<decltype(lhs + rhs)>(rhs));
|
||||
}
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsLessOrEqual {
|
||||
static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
|
||||
"Types must be numeric.");
|
||||
static constexpr bool Test(const L lhs, const R rhs) {
|
||||
return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
|
||||
DstRangeRelationToSrcRange<L>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
constexpr bool IsGreaterImpl(const L lhs, const R rhs, const RangeCheck l_range,
|
||||
const RangeCheck r_range) {
|
||||
return l_range.IsOverflow() || r_range.IsUnderflow() ||
|
||||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >
|
||||
static_cast<decltype(lhs + rhs)>(rhs));
|
||||
}
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsGreater {
|
||||
static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
|
||||
"Types must be numeric.");
|
||||
static constexpr bool Test(const L lhs, const R rhs) {
|
||||
return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
|
||||
DstRangeRelationToSrcRange<L>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
constexpr bool IsGreaterOrEqualImpl(const L lhs, const R rhs,
|
||||
const RangeCheck l_range,
|
||||
const RangeCheck r_range) {
|
||||
return l_range.IsOverflow() || r_range.IsUnderflow() ||
|
||||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >=
|
||||
static_cast<decltype(lhs + rhs)>(rhs));
|
||||
}
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsGreaterOrEqual {
|
||||
static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
|
||||
"Types must be numeric.");
|
||||
static constexpr bool Test(const L lhs, const R rhs) {
|
||||
return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
|
||||
DstRangeRelationToSrcRange<L>(rhs));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsEqual {
|
||||
static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
|
||||
"Types must be numeric.");
|
||||
static constexpr bool Test(const L lhs, const R rhs) {
|
||||
return DstRangeRelationToSrcRange<R>(lhs) ==
|
||||
DstRangeRelationToSrcRange<L>(rhs) &&
|
||||
static_cast<decltype(lhs + rhs)>(lhs) ==
|
||||
static_cast<decltype(lhs + rhs)>(rhs);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename R>
|
||||
struct IsNotEqual {
|
||||
static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
|
||||
"Types must be numeric.");
|
||||
static constexpr bool Test(const L lhs, const R rhs) {
|
||||
return DstRangeRelationToSrcRange<R>(lhs) !=
|
||||
DstRangeRelationToSrcRange<L>(rhs) ||
|
||||
static_cast<decltype(lhs + rhs)>(lhs) !=
|
||||
static_cast<decltype(lhs + rhs)>(rhs);
|
||||
}
|
||||
};
|
||||
|
||||
// These perform the actual math operations on the CheckedNumerics.
|
||||
// Binary arithmetic operations.
|
||||
template <template <typename, typename> class C, typename L, typename R>
|
||||
constexpr bool SafeCompare(const L lhs, const R rhs) {
|
||||
static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
|
||||
"Types must be numeric.");
|
||||
using Promotion = BigEnoughPromotion<L, R>;
|
||||
using BigType = typename Promotion::type;
|
||||
return Promotion::is_contained
|
||||
// Force to a larger type for speed if both are contained.
|
||||
? C<BigType, BigType>::Test(
|
||||
static_cast<BigType>(static_cast<L>(lhs)),
|
||||
static_cast<BigType>(static_cast<R>(rhs)))
|
||||
// Let the template functions figure it out for mixed types.
|
||||
: C<L, R>::Test(lhs, rhs);
|
||||
}
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
constexpr bool IsMaxInRangeForNumericType() {
|
||||
return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
|
||||
std::numeric_limits<Src>::max());
|
||||
}
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
constexpr bool IsMinInRangeForNumericType() {
|
||||
return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
|
||||
std::numeric_limits<Src>::lowest());
|
||||
}
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
constexpr Dst CommonMax() {
|
||||
return !IsMaxInRangeForNumericType<Dst, Src>()
|
||||
? Dst(std::numeric_limits<Dst>::max())
|
||||
: Dst(std::numeric_limits<Src>::max());
|
||||
}
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
constexpr Dst CommonMin() {
|
||||
return !IsMinInRangeForNumericType<Dst, Src>()
|
||||
? Dst(std::numeric_limits<Dst>::lowest())
|
||||
: Dst(std::numeric_limits<Src>::lowest());
|
||||
}
|
||||
|
||||
// This is a wrapper to generate return the max or min for a supplied type.
|
||||
// If the argument is false, the returned value is the maximum. If true the
|
||||
// returned value is the minimum.
|
||||
template <typename Dst, typename Src = Dst>
|
||||
constexpr Dst CommonMaxOrMin(bool is_min) {
|
||||
return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
|
||||
}
|
||||
|
||||
} // namespace v8::base::internal
|
||||
|
||||
#endif // V8_BASE_SAFE_CONVERSIONS_IMPL_H_
|
8
deps/v8/src/base/small-map.h
vendored
8
deps/v8/src/base/small-map.h
vendored
|
@ -234,10 +234,6 @@ class SmallMap {
|
|||
}
|
||||
}
|
||||
|
||||
V8_INLINE bool operator!=(const iterator& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
private:
|
||||
friend class SmallMap;
|
||||
friend class const_iterator;
|
||||
|
@ -310,10 +306,6 @@ class SmallMap {
|
|||
return other.array_iter_ == nullptr && map_iter_ == other.map_iter_;
|
||||
}
|
||||
|
||||
V8_INLINE bool operator!=(const const_iterator& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
private:
|
||||
friend class SmallMap;
|
||||
V8_INLINE explicit const_iterator(const value_type* init)
|
||||
|
|
6
deps/v8/src/base/threaded-list.h
vendored
6
deps/v8/src/base/threaded-list.h
vendored
|
@ -196,9 +196,6 @@ class ThreadedListBase final : public BaseClass {
|
|||
bool operator==(const Iterator& other) const {
|
||||
return entry_ == other.entry_;
|
||||
}
|
||||
bool operator!=(const Iterator& other) const {
|
||||
return entry_ != other.entry_;
|
||||
}
|
||||
T*& operator*() { return *entry_; }
|
||||
T* operator->() { return *entry_; }
|
||||
Iterator& operator=(T* entry) {
|
||||
|
@ -247,9 +244,6 @@ class ThreadedListBase final : public BaseClass {
|
|||
bool operator==(const ConstIterator& other) const {
|
||||
return entry_ == other.entry_;
|
||||
}
|
||||
bool operator!=(const ConstIterator& other) const {
|
||||
return entry_ != other.entry_;
|
||||
}
|
||||
const T* operator*() const { return *entry_; }
|
||||
|
||||
private:
|
||||
|
|
23
deps/v8/src/base/vector.h
vendored
23
deps/v8/src/base/vector.h
vendored
|
@ -130,7 +130,7 @@ class Vector {
|
|||
length_ = 0;
|
||||
}
|
||||
|
||||
Vector<T> operator+(size_t offset) {
|
||||
const Vector<T> operator+(size_t offset) const {
|
||||
DCHECK_LE(offset, length_);
|
||||
return Vector<T>(start_ + offset, length_ - offset);
|
||||
}
|
||||
|
@ -169,22 +169,12 @@ class Vector {
|
|||
return std::equal(begin(), end(), other.begin(), other.end());
|
||||
}
|
||||
|
||||
bool operator!=(const Vector<T>& other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
template <typename TT = T>
|
||||
requires(!std::is_const_v<TT>)
|
||||
bool operator==(const Vector<const T>& other) const {
|
||||
return std::equal(begin(), end(), other.begin(), other.end());
|
||||
}
|
||||
|
||||
template <typename TT = T>
|
||||
requires(!std::is_const_v<TT>)
|
||||
bool operator!=(const Vector<const T>& other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
private:
|
||||
T* start_;
|
||||
size_t length_;
|
||||
|
@ -289,6 +279,16 @@ class OwnedVector {
|
|||
return OwnedVector<T>(std::make_unique<T[]>(size), size);
|
||||
}
|
||||
|
||||
// Allocates a new vector of the specified size via the default allocator and
|
||||
// initializes all elements by assigning from `init`.
|
||||
template <typename U>
|
||||
static OwnedVector<T> New(size_t size, U init) {
|
||||
if (size == 0) return {};
|
||||
OwnedVector<T> vec = NewForOverwrite(size);
|
||||
std::fill_n(vec.begin(), size, init);
|
||||
return vec;
|
||||
}
|
||||
|
||||
// Allocates a new vector of the specified size via the default allocator.
|
||||
// Elements in the new vector are default-initialized.
|
||||
static OwnedVector<T> NewForOverwrite(size_t size) {
|
||||
|
@ -307,7 +307,6 @@ class OwnedVector {
|
|||
}
|
||||
|
||||
bool operator==(std::nullptr_t) const { return data_ == nullptr; }
|
||||
bool operator!=(std::nullptr_t) const { return data_ != nullptr; }
|
||||
|
||||
private:
|
||||
template <typename U>
|
||||
|
|
|
@ -441,9 +441,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
@ -451,8 +451,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
|
|
@ -494,9 +494,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
if (skip_interrupt_label) __ B(ge, skip_interrupt_label);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
@ -504,8 +504,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
|
6
deps/v8/src/baseline/baseline-assembler.h
vendored
6
deps/v8/src/baseline/baseline-assembler.h
vendored
|
@ -208,11 +208,11 @@ class BaselineAssembler {
|
|||
kDefault,
|
||||
kForceDecompression,
|
||||
};
|
||||
inline void LdaContextSlot(
|
||||
inline void LdaContextSlotNoCell(
|
||||
Register context, uint32_t index, uint32_t depth,
|
||||
CompressionMode compression_mode = CompressionMode::kDefault);
|
||||
inline void StaContextSlot(Register context, Register value, uint32_t index,
|
||||
uint32_t depth);
|
||||
inline void StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth);
|
||||
inline void LdaModuleVariable(Register context, int cell_index,
|
||||
uint32_t depth);
|
||||
inline void StaModuleVariable(Register context, Register value,
|
||||
|
|
75
deps/v8/src/baseline/baseline-compiler.cc
vendored
75
deps/v8/src/baseline/baseline-compiler.cc
vendored
|
@ -626,8 +626,8 @@ constexpr static bool BuiltinMayDeopt(Builtin id) {
|
|||
case Builtin::kBaselineOutOfLinePrologue:
|
||||
case Builtin::kIncBlockCounter:
|
||||
case Builtin::kToObject:
|
||||
case Builtin::kStoreScriptContextSlotBaseline:
|
||||
case Builtin::kStoreCurrentScriptContextSlotBaseline:
|
||||
case Builtin::kStoreContextElementBaseline:
|
||||
case Builtin::kStoreCurrentContextElementBaseline:
|
||||
// This one explicitly skips the construct if the debugger is enabled.
|
||||
case Builtin::kFindNonDefaultConstructorOrConstruct:
|
||||
return false;
|
||||
|
@ -757,37 +757,41 @@ void BaselineCompiler::VisitPopContext() {
|
|||
__ StoreContext(context);
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaContextSlot() {
|
||||
void BaselineCompiler::VisitLdaContextSlotNoCell() {
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||
Register context = scratch_scope.AcquireScratch();
|
||||
LoadRegister(context, 0);
|
||||
uint32_t index = Index(1);
|
||||
uint32_t depth = Uint(2);
|
||||
__ LdaContextSlot(context, index, depth);
|
||||
__ LdaContextSlotNoCell(context, index, depth);
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaScriptContextSlot() {
|
||||
void BaselineCompiler::VisitLdaContextSlot() {
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||
Register context = scratch_scope.AcquireScratch();
|
||||
Label done;
|
||||
LoadRegister(context, 0);
|
||||
uint32_t index = Index(1);
|
||||
uint32_t depth = Uint(2);
|
||||
__ LdaContextSlot(context, index, depth,
|
||||
BaselineAssembler::CompressionMode::kForceDecompression);
|
||||
__ LdaContextSlotNoCell(
|
||||
context, index, depth,
|
||||
BaselineAssembler::CompressionMode::kForceDecompression);
|
||||
__ JumpIfSmi(kInterpreterAccumulatorRegister, &done);
|
||||
__ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
|
||||
HEAP_NUMBER_TYPE, &done, Label::kNear);
|
||||
CallBuiltin<Builtin::kAllocateIfMutableHeapNumberScriptContextSlot>(
|
||||
CONTEXT_CELL_TYPE, &done, Label::kNear);
|
||||
// TODO(victorgomes): inline trivial constant value read from context cell.
|
||||
CallBuiltin<Builtin::kLoadFromContextCell>(
|
||||
kInterpreterAccumulatorRegister, // heap number
|
||||
context, // context
|
||||
Smi::FromInt(index)); // slot
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaImmutableContextSlot() { VisitLdaContextSlot(); }
|
||||
void BaselineCompiler::VisitLdaImmutableContextSlot() {
|
||||
VisitLdaContextSlotNoCell();
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaCurrentContextSlot() {
|
||||
void BaselineCompiler::VisitLdaCurrentContextSlotNoCell() {
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||
Register context = scratch_scope.AcquireScratch();
|
||||
__ LoadContext(context);
|
||||
|
@ -795,7 +799,7 @@ void BaselineCompiler::VisitLdaCurrentContextSlot() {
|
|||
Context::OffsetOfElementAt(Index(0)));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaCurrentScriptContextSlot() {
|
||||
void BaselineCompiler::VisitLdaCurrentContextSlot() {
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||
Register context = scratch_scope.AcquireScratch();
|
||||
Label done;
|
||||
|
@ -805,8 +809,9 @@ void BaselineCompiler::VisitLdaCurrentScriptContextSlot() {
|
|||
Context::OffsetOfElementAt(index));
|
||||
__ JumpIfSmi(kInterpreterAccumulatorRegister, &done);
|
||||
__ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
|
||||
HEAP_NUMBER_TYPE, &done, Label::kNear);
|
||||
CallBuiltin<Builtin::kAllocateIfMutableHeapNumberScriptContextSlot>(
|
||||
CONTEXT_CELL_TYPE, &done, Label::kNear);
|
||||
// TODO(victorgomes): inline trivial constant value read from context cell.
|
||||
CallBuiltin<Builtin::kLoadFromContextCell>(
|
||||
kInterpreterAccumulatorRegister, // heap number
|
||||
context, // context
|
||||
Smi::FromInt(index)); // slot
|
||||
|
@ -814,10 +819,10 @@ void BaselineCompiler::VisitLdaCurrentScriptContextSlot() {
|
|||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
|
||||
VisitLdaCurrentContextSlot();
|
||||
VisitLdaCurrentContextSlotNoCell();
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitStaContextSlot() {
|
||||
void BaselineCompiler::VisitStaContextSlotNoCell() {
|
||||
Register value = WriteBarrierDescriptor::ValueRegister();
|
||||
Register context = WriteBarrierDescriptor::ObjectRegister();
|
||||
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
|
||||
|
@ -825,10 +830,10 @@ void BaselineCompiler::VisitStaContextSlot() {
|
|||
LoadRegister(context, 0);
|
||||
uint32_t index = Index(1);
|
||||
uint32_t depth = Uint(2);
|
||||
__ StaContextSlot(context, value, index, depth);
|
||||
__ StaContextSlotNoCell(context, value, index, depth);
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitStaCurrentContextSlot() {
|
||||
void BaselineCompiler::VisitStaCurrentContextSlotNoCell() {
|
||||
Register value = WriteBarrierDescriptor::ValueRegister();
|
||||
Register context = WriteBarrierDescriptor::ObjectRegister();
|
||||
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
|
||||
|
@ -838,26 +843,25 @@ void BaselineCompiler::VisitStaCurrentContextSlot() {
|
|||
context, Context::OffsetOfElementAt(Index(0)), value);
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitStaScriptContextSlot() {
|
||||
void BaselineCompiler::VisitStaContextSlot() {
|
||||
Register value = WriteBarrierDescriptor::ValueRegister();
|
||||
Register context = WriteBarrierDescriptor::ObjectRegister();
|
||||
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
|
||||
__ Move(value, kInterpreterAccumulatorRegister);
|
||||
LoadRegister(context, 0);
|
||||
SaveAccumulatorScope accumulator_scope(this, &basm_);
|
||||
CallBuiltin<Builtin::kStoreScriptContextSlotBaseline>(
|
||||
context, // context
|
||||
value, // value
|
||||
IndexAsSmi(1), // slot
|
||||
UintAsTagged(2)); // depth
|
||||
CallBuiltin<Builtin::kStoreContextElementBaseline>(context, // context
|
||||
value, // value
|
||||
IndexAsSmi(1), // slot
|
||||
UintAsTagged(2)); // depth
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitStaCurrentScriptContextSlot() {
|
||||
void BaselineCompiler::VisitStaCurrentContextSlot() {
|
||||
Register value = WriteBarrierDescriptor::ValueRegister();
|
||||
DCHECK(!AreAliased(value, kInterpreterAccumulatorRegister));
|
||||
SaveAccumulatorScope accumulator_scope(this, &basm_);
|
||||
__ Move(value, kInterpreterAccumulatorRegister);
|
||||
CallBuiltin<Builtin::kStoreCurrentScriptContextSlotBaseline>(
|
||||
CallBuiltin<Builtin::kStoreCurrentContextElementBaseline>(
|
||||
value, // value
|
||||
IndexAsSmi(0)); // slot
|
||||
}
|
||||
|
@ -866,12 +870,12 @@ void BaselineCompiler::VisitLdaLookupSlot() {
|
|||
CallRuntime(Runtime::kLoadLookupSlot, Constant<Name>(0));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaLookupContextSlot() {
|
||||
CallBuiltin<Builtin::kLookupContextBaseline>(
|
||||
void BaselineCompiler::VisitLdaLookupContextSlotNoCell() {
|
||||
CallBuiltin<Builtin::kLookupContextNoCellBaseline>(
|
||||
Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaLookupScriptContextSlot() {
|
||||
void BaselineCompiler::VisitLdaLookupContextSlot() {
|
||||
CallBuiltin<Builtin::kLookupScriptContextBaseline>(
|
||||
Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
|
||||
}
|
||||
|
@ -885,13 +889,13 @@ void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() {
|
|||
CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, Constant<Name>(0));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaLookupContextSlotInsideTypeof() {
|
||||
CallBuiltin<Builtin::kLookupContextInsideTypeofBaseline>(
|
||||
void BaselineCompiler::VisitLdaLookupContextSlotNoCellInsideTypeof() {
|
||||
CallBuiltin<Builtin::kLookupContextNoCellInsideTypeofBaseline>(
|
||||
Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitLdaLookupScriptContextSlotInsideTypeof() {
|
||||
CallBuiltin<Builtin::kLookupScriptContextInsideTypeofBaseline>(
|
||||
void BaselineCompiler::VisitLdaLookupContextSlotInsideTypeof() {
|
||||
CallBuiltin<Builtin::kLookupContextInsideTypeofBaseline>(
|
||||
Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
|
||||
}
|
||||
|
||||
|
@ -1072,6 +1076,11 @@ void BaselineCompiler::VisitAdd() {
|
|||
RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitAdd_LhsIsStringConstant_Internalize() {
|
||||
CallBuiltin<Builtin::kAdd_LhsIsStringConstant_Internalize_Baseline>(
|
||||
RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
|
||||
}
|
||||
|
||||
void BaselineCompiler::VisitSub() {
|
||||
CallBuiltin<Builtin::kSubtract_Baseline>(
|
||||
RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
|
||||
|
|
|
@ -413,9 +413,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
@ -423,8 +423,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
|
|
@ -428,9 +428,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
@ -438,8 +438,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
|
|
@ -425,9 +425,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
@ -435,8 +435,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
|
|
@ -534,9 +534,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
if (skip_interrupt_label) __ bge(skip_interrupt_label, cr0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
|
@ -545,8 +545,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
|
|
|
@ -437,9 +437,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
@ -447,8 +447,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
|
|
@ -534,9 +534,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
@ -544,8 +544,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
Context::OffsetOfElementAt(index));
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
for (; depth > 0; --depth) {
|
||||
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||
}
|
||||
|
|
|
@ -423,9 +423,9 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
void BaselineAssembler::LdaContextSlotNoCell(Register context, uint32_t index,
|
||||
uint32_t depth,
|
||||
CompressionMode compression_mode) {
|
||||
// [context] is coming from interpreter frame so it is already decompressed
|
||||
// when pointer compression is enabled. In order to make use of complex
|
||||
// addressing mode, any intermediate context pointer is loaded in compressed
|
||||
|
@ -449,8 +449,8 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
void BaselineAssembler::StaContextSlotNoCell(Register context, Register value,
|
||||
uint32_t index, uint32_t depth) {
|
||||
// [context] is coming from interpreter frame so it is already decompressed
|
||||
// when pointer compression is enabled. In order to make use of complex
|
||||
// addressing mode, any intermediate context pointer is loaded in compressed
|
||||
|
|
6
deps/v8/src/builtins/accessors.cc
vendored
6
deps/v8/src/builtins/accessors.cc
vendored
|
@ -206,7 +206,7 @@ void Accessors::ArrayLengthSetter(
|
|||
return;
|
||||
}
|
||||
|
||||
if (JSArray::SetLength(array, length).IsNothing()) {
|
||||
if (JSArray::SetLength(isolate, array, length).IsNothing()) {
|
||||
// TODO(victorgomes): AccessorNameBooleanSetterCallback does not handle
|
||||
// exceptions.
|
||||
FATAL("Fatal JavaScript invalid array length %u", length);
|
||||
|
@ -326,7 +326,7 @@ static DirectHandle<Object> GetFunctionPrototype(
|
|||
DisableTemporaryObjectTracking no_temp_tracking(isolate->debug());
|
||||
DirectHandle<JSObject> proto =
|
||||
isolate->factory()->NewFunctionPrototype(function);
|
||||
JSFunction::SetPrototype(function, proto);
|
||||
JSFunction::SetPrototype(isolate, function, proto);
|
||||
}
|
||||
return DirectHandle<Object>(function->prototype(), isolate);
|
||||
}
|
||||
|
@ -353,7 +353,7 @@ void Accessors::FunctionPrototypeSetter(
|
|||
DirectHandle<JSFunction> object =
|
||||
Cast<JSFunction>(Utils::OpenDirectHandle(*info.Holder()));
|
||||
DCHECK(object->has_prototype_property());
|
||||
JSFunction::SetPrototype(object, value);
|
||||
JSFunction::SetPrototype(isolate, object, value);
|
||||
info.GetReturnValue().Set(true);
|
||||
}
|
||||
|
||||
|
|
239
deps/v8/src/builtins/arm/builtins-arm.cc
vendored
239
deps/v8/src/builtins/arm/builtins-arm.cc
vendored
|
@ -2947,77 +2947,59 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
|
|||
namespace {
|
||||
// Check that the stack was in the old state (if generated code assertions are
|
||||
// enabled), and switch to the new state.
|
||||
void SwitchStackState(MacroAssembler* masm, Register jmpbuf, Register tmp,
|
||||
void SwitchStackState(MacroAssembler* masm, Register stack, Register tmp,
|
||||
wasm::JumpBuffer::StackState old_state,
|
||||
wasm::JumpBuffer::StackState new_state) {
|
||||
__ ldr(tmp, MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
|
||||
__ ldr(tmp, MemOperand(stack, wasm::kStackStateOffset));
|
||||
Label ok;
|
||||
__ JumpIfEqual(tmp, old_state, &ok);
|
||||
__ Trap();
|
||||
__ bind(&ok);
|
||||
__ mov(tmp, Operand(new_state));
|
||||
__ str(tmp, MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
|
||||
__ str(tmp, MemOperand(stack, wasm::kStackStateOffset));
|
||||
}
|
||||
|
||||
// Switch the stack pointer.
|
||||
void SwitchStackPointer(MacroAssembler* masm, Register jmpbuf) {
|
||||
__ ldr(sp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
|
||||
void SwitchStackPointer(MacroAssembler* masm, Register stack) {
|
||||
__ ldr(sp, MemOperand(stack, wasm::kStackSpOffset));
|
||||
}
|
||||
|
||||
void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* target,
|
||||
void FillJumpBuffer(MacroAssembler* masm, Register stack, Label* target,
|
||||
Register tmp) {
|
||||
__ mov(tmp, sp);
|
||||
__ str(tmp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
|
||||
__ str(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
|
||||
__ str(tmp, MemOperand(stack, wasm::kStackSpOffset));
|
||||
__ str(fp, MemOperand(stack, wasm::kStackFpOffset));
|
||||
__ LoadStackLimit(tmp, StackLimitKind::kRealStackLimit);
|
||||
__ str(tmp, MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset));
|
||||
__ str(tmp, MemOperand(stack, wasm::kStackLimitOffset));
|
||||
|
||||
__ GetLabelAddress(tmp, target);
|
||||
// Stash the address in the jump buffer.
|
||||
__ str(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
|
||||
__ str(tmp, MemOperand(stack, wasm::kStackPcOffset));
|
||||
}
|
||||
|
||||
void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc,
|
||||
void LoadJumpBuffer(MacroAssembler* masm, Register stack, bool load_pc,
|
||||
Register tmp, wasm::JumpBuffer::StackState expected_state) {
|
||||
SwitchStackPointer(masm, jmpbuf);
|
||||
__ ldr(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
|
||||
SwitchStackState(masm, jmpbuf, tmp, expected_state, wasm::JumpBuffer::Active);
|
||||
SwitchStackPointer(masm, stack);
|
||||
__ ldr(fp, MemOperand(stack, wasm::kStackFpOffset));
|
||||
SwitchStackState(masm, stack, tmp, expected_state, wasm::JumpBuffer::Active);
|
||||
if (load_pc) {
|
||||
__ ldr(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
|
||||
__ ldr(tmp, MemOperand(stack, wasm::kStackPcOffset));
|
||||
__ bx(tmp);
|
||||
}
|
||||
// The stack limit in StackGuard is set separately under the ExecutionAccess
|
||||
// lock.
|
||||
}
|
||||
|
||||
void SaveState(MacroAssembler* masm, Register active_continuation, Register tmp,
|
||||
Label* suspend) {
|
||||
Register jmpbuf = tmp;
|
||||
__ ldr(jmpbuf, FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kStackOffset));
|
||||
__ add(jmpbuf, jmpbuf, Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
|
||||
UseScratchRegisterScope temps(masm);
|
||||
FillJumpBuffer(masm, jmpbuf, suspend, temps.Acquire());
|
||||
}
|
||||
|
||||
void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation,
|
||||
void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_stack,
|
||||
Register tmp,
|
||||
wasm::JumpBuffer::StackState expected_state) {
|
||||
Register target_jmpbuf = target_continuation;
|
||||
__ ldr(target_jmpbuf, FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kStackOffset));
|
||||
__ add(target_jmpbuf, target_jmpbuf,
|
||||
Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
|
||||
__ Zero(MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
|
||||
// Switch stack!
|
||||
LoadJumpBuffer(masm, target_jmpbuf, false, tmp, expected_state);
|
||||
LoadJumpBuffer(masm, target_stack, false, tmp, expected_state);
|
||||
}
|
||||
|
||||
// Updates the stack limit and central stack info, and validates the switch.
|
||||
void SwitchStacks(MacroAssembler* masm, Register old_continuation,
|
||||
bool return_switch,
|
||||
void SwitchStacks(MacroAssembler* masm, Register old_stack, bool return_switch,
|
||||
const std::initializer_list<Register> keep) {
|
||||
using ER = ExternalReference;
|
||||
|
||||
|
@ -3028,8 +3010,9 @@ void SwitchStacks(MacroAssembler* masm, Register old_continuation,
|
|||
{
|
||||
__ PrepareCallCFunction(2);
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
// Move {old_stack} first in case it aliases kCArgRegs[0].
|
||||
__ Move(kCArgRegs[1], old_stack);
|
||||
__ Move(kCArgRegs[0], ExternalReference::isolate_address(masm->isolate()));
|
||||
__ Move(kCArgRegs[1], old_continuation);
|
||||
__ CallCFunction(
|
||||
return_switch ? ER::wasm_return_switch() : ER::wasm_switch_stacks(), 2);
|
||||
}
|
||||
|
@ -3039,47 +3022,34 @@ void SwitchStacks(MacroAssembler* masm, Register old_continuation,
|
|||
}
|
||||
}
|
||||
|
||||
void ReloadParentContinuation(MacroAssembler* masm, Register return_reg,
|
||||
Register return_value, Register context,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
Register active_continuation = tmp1;
|
||||
__ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
|
||||
void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
||||
Register return_value, Register context, Register tmp1,
|
||||
Register tmp2, Register tmp3) {
|
||||
Register active_stack = tmp1;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
|
||||
// Set a null pointer in the jump buffer's SP slot to indicate to the stack
|
||||
// frame iterator that this stack is empty.
|
||||
Register jmpbuf = tmp2;
|
||||
__ ldr(jmpbuf, FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kStackOffset));
|
||||
__ add(jmpbuf, jmpbuf, Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
__ Zero(MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
|
||||
__ Zero(MemOperand(active_stack, wasm::kStackSpOffset));
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
|
||||
SwitchStackState(masm, active_stack, scratch, wasm::JumpBuffer::Active,
|
||||
wasm::JumpBuffer::Retired);
|
||||
}
|
||||
Register parent = tmp2;
|
||||
__ LoadTaggedField(parent,
|
||||
FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
__ ldr(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
|
||||
// Update active continuation root.
|
||||
int32_t active_continuation_offset =
|
||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveContinuation);
|
||||
__ str(parent, MemOperand(kRootRegister, active_continuation_offset));
|
||||
jmpbuf = parent;
|
||||
__ ldr(jmpbuf, FieldMemOperand(parent, WasmContinuationObject::kStackOffset));
|
||||
__ add(jmpbuf, jmpbuf, Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
// Update active stack.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, active_continuation, true,
|
||||
{return_reg, return_value, context, jmpbuf});
|
||||
LoadJumpBuffer(masm, jmpbuf, false, tmp3, wasm::JumpBuffer::Inactive);
|
||||
SwitchStacks(masm, active_stack, true,
|
||||
{return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3, wasm::JumpBuffer::Inactive);
|
||||
}
|
||||
|
||||
void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
||||
Register tmp2) {
|
||||
void RestoreParentSuspender(MacroAssembler* masm, Register tmp1) {
|
||||
Register suspender = tmp1;
|
||||
__ LoadRoot(suspender, RootIndex::kActiveSuspender);
|
||||
__ LoadTaggedField(
|
||||
|
@ -3290,30 +3260,25 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
DEFINE_REG(continuation);
|
||||
__ LoadRoot(continuation, RootIndex::kActiveContinuation);
|
||||
DEFINE_REG(jmpbuf);
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(scratch);
|
||||
__ ldr(jmpbuf,
|
||||
FieldMemOperand(continuation, WasmContinuationObject::kStackOffset));
|
||||
__ add(jmpbuf, jmpbuf, Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
FillJumpBuffer(masm, jmpbuf, &resume, scratch);
|
||||
SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
|
||||
FillJumpBuffer(masm, stack, &resume, scratch);
|
||||
SwitchStackState(masm, stack, scratch, wasm::JumpBuffer::Active,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
regs.ResetExcept(suspender, continuation);
|
||||
regs.ResetExcept(suspender, stack);
|
||||
|
||||
DEFINE_REG(suspender_continuation);
|
||||
__ LoadTaggedField(
|
||||
suspender_continuation,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
DEFINE_REG(suspender_stack);
|
||||
__ ldr(suspender_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
if (v8_flags.debug_code) {
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's continuation is the active continuation.
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this
|
||||
// check will not hold anymore: it's possible that the active continuation
|
||||
// check will not hold anymore: it's possible that the active stack
|
||||
// changed (due to an internal switch), so we have to update the suspender.
|
||||
__ cmp(suspender_continuation, continuation);
|
||||
__ cmp(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ b(&ok, eq);
|
||||
__ Trap();
|
||||
|
@ -3323,13 +3288,8 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ LoadTaggedField(caller,
|
||||
FieldMemOperand(suspender_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
int32_t active_continuation_offset =
|
||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveContinuation);
|
||||
__ str(caller, MemOperand(kRootRegister, active_continuation_offset));
|
||||
__ ldr(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
DEFINE_REG(parent);
|
||||
__ LoadTaggedField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
|
@ -3337,16 +3297,13 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveSuspender);
|
||||
__ str(parent, MemOperand(kRootRegister, active_suspender_offset));
|
||||
regs.ResetExcept(suspender, caller, continuation);
|
||||
regs.ResetExcept(suspender, caller, stack);
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, continuation, false, {caller, suspender});
|
||||
FREE_REG(continuation);
|
||||
ASSIGN_REG(jmpbuf);
|
||||
__ ldr(jmpbuf, FieldMemOperand(caller, WasmContinuationObject::kStackOffset));
|
||||
__ add(jmpbuf, jmpbuf, Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
SwitchStacks(masm, stack, false, {caller, suspender});
|
||||
FREE_REG(stack);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
|
@ -3354,7 +3311,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Zero(GCScanSlotPlace);
|
||||
ASSIGN_REG(scratch)
|
||||
LoadJumpBuffer(masm, jmpbuf, true, scratch, wasm::JumpBuffer::Inactive);
|
||||
LoadJumpBuffer(masm, caller, true, scratch, wasm::JumpBuffer::Inactive);
|
||||
if (v8_flags.debug_code) {
|
||||
__ Trap();
|
||||
}
|
||||
|
@ -3408,21 +3365,15 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
// Save current state.
|
||||
// -------------------------------------------
|
||||
Label suspend;
|
||||
DEFINE_REG(active_continuation);
|
||||
__ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
|
||||
DEFINE_REG(current_jmpbuf);
|
||||
DEFINE_REG(active_stack);
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(scratch);
|
||||
__ ldr(current_jmpbuf, FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kStackOffset));
|
||||
__ add(current_jmpbuf, current_jmpbuf,
|
||||
Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
FillJumpBuffer(masm, current_jmpbuf, &suspend, scratch);
|
||||
SwitchStackState(masm, current_jmpbuf, scratch, wasm::JumpBuffer::Active,
|
||||
FillJumpBuffer(masm, active_stack, &suspend, scratch);
|
||||
SwitchStackState(masm, active_stack, scratch, wasm::JumpBuffer::Active,
|
||||
wasm::JumpBuffer::Inactive);
|
||||
FREE_REG(current_jmpbuf);
|
||||
|
||||
// -------------------------------------------
|
||||
// Set the suspender and continuation parents and update the roots
|
||||
// Set the suspender and stack parents and update the roots
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(active_suspender);
|
||||
__ LoadRoot(active_suspender, RootIndex::kActiveSuspender);
|
||||
|
@ -3441,50 +3392,29 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
// the same register for target_continuation to use it in RecordWriteField.
|
||||
// So, free suspender here to use pinned reg, but load from it next line.
|
||||
FREE_REG(suspender);
|
||||
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
|
||||
suspender = target_continuation;
|
||||
__ LoadTaggedField(
|
||||
target_continuation,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
DEFINE_REG(target_stack);
|
||||
suspender = target_stack;
|
||||
__ ldr(target_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset));
|
||||
suspender = no_reg;
|
||||
|
||||
__ StoreTaggedField(active_continuation,
|
||||
FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
DEFINE_REG(old_continuation);
|
||||
__ Move(old_continuation, active_continuation);
|
||||
__ RecordWriteField(
|
||||
target_continuation, WasmContinuationObject::kParentOffset,
|
||||
active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore);
|
||||
FREE_REG(active_continuation);
|
||||
int32_t active_continuation_offset =
|
||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveContinuation);
|
||||
__ str(target_continuation,
|
||||
MemOperand(kRootRegister, active_continuation_offset));
|
||||
|
||||
SwitchStacks(masm, old_continuation, false, {target_continuation});
|
||||
|
||||
regs.ResetExcept(target_continuation);
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
SwitchStacks(masm, active_stack, false, {target_stack});
|
||||
regs.ResetExcept(target_stack);
|
||||
|
||||
// -------------------------------------------
|
||||
// Load state from target jmpbuf (longjmp).
|
||||
// -------------------------------------------
|
||||
regs.Reserve(kReturnRegister0);
|
||||
DEFINE_REG(target_jmpbuf);
|
||||
ASSIGN_REG(scratch);
|
||||
__ ldr(target_jmpbuf, FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kStackOffset));
|
||||
__ add(target_jmpbuf, target_jmpbuf,
|
||||
Operand(wasm::StackMemory::jmpbuf_offset()));
|
||||
// Move resolved value to return register.
|
||||
__ ldr(kReturnRegister0, MemOperand(fp, 3 * kSystemPointerSize));
|
||||
MemOperand GCScanSlotPlace =
|
||||
MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Zero(GCScanSlotPlace);
|
||||
if (on_resume == wasm::OnResume::kThrow) {
|
||||
// Switch to the continuation's stack without restoring the PC.
|
||||
LoadJumpBuffer(masm, target_jmpbuf, false, scratch,
|
||||
// Switch without restoring the PC.
|
||||
LoadJumpBuffer(masm, target_stack, false, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
// Pop this frame now. The unwinder expects that the first STACK_SWITCH
|
||||
// frame is the outermost one.
|
||||
|
@ -3493,8 +3423,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ Push(kReturnRegister0);
|
||||
__ CallRuntime(Runtime::kThrow);
|
||||
} else {
|
||||
// Resume the continuation normally.
|
||||
LoadJumpBuffer(masm, target_jmpbuf, true, scratch,
|
||||
// Resume the stack normally.
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
}
|
||||
if (v8_flags.debug_code) {
|
||||
|
@ -3528,27 +3458,23 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
Label* suspend) {
|
||||
ResetStackSwitchFrameStackSlots(masm);
|
||||
DEFINE_SCOPED(scratch)
|
||||
DEFINE_REG(target_continuation)
|
||||
__ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
|
||||
DEFINE_REG(parent_continuation)
|
||||
__ LoadTaggedField(parent_continuation,
|
||||
FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
DEFINE_REG(target_stack)
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(parent_stack)
|
||||
__ ldr(parent_stack, MemOperand(target_stack, wasm::kStackParentOffset));
|
||||
|
||||
SaveState(masm, parent_continuation, scratch, suspend);
|
||||
FillJumpBuffer(masm, parent_stack, suspend, scratch);
|
||||
SwitchStacks(masm, parent_stack, false, {wasm_instance, wrapper_buffer});
|
||||
|
||||
SwitchStacks(masm, parent_continuation, false,
|
||||
{wasm_instance, wrapper_buffer});
|
||||
|
||||
FREE_REG(parent_continuation);
|
||||
FREE_REG(parent_stack);
|
||||
// Save the old stack's fp in x9, and use it to access the parameters in
|
||||
// the parent frame.
|
||||
regs.Pinned(r9, &original_fp);
|
||||
__ Move(original_fp, fp);
|
||||
__ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
|
||||
LoadTargetJumpBuffer(masm, target_continuation, scratch,
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
LoadTargetJumpBuffer(masm, target_stack, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
FREE_REG(target_continuation);
|
||||
FREE_REG(target_stack);
|
||||
|
||||
// Push the loaded fp. We know it is null, because there is no frame yet,
|
||||
// so we could also push 0 directly. In any case we need to push it,
|
||||
|
@ -3629,9 +3555,9 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
|
||||
GetContextFromImplicitArg(masm, kContextRegister, tmp);
|
||||
|
||||
ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp,
|
||||
tmp2, tmp3);
|
||||
RestoreParentSuspender(masm, tmp, tmp2);
|
||||
ReloadParentStack(masm, promise, return_value, kContextRegister, tmp, tmp2,
|
||||
tmp3);
|
||||
RestoreParentSuspender(masm, tmp);
|
||||
|
||||
if (mode == wasm::kPromise) {
|
||||
__ Move(tmp, Operand(1));
|
||||
|
@ -3678,9 +3604,8 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
|
|||
__ ldr(kContextRegister,
|
||||
MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
|
||||
GetContextFromImplicitArg(masm, kContextRegister, tmp);
|
||||
ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2,
|
||||
tmp3);
|
||||
RestoreParentSuspender(masm, tmp, tmp2);
|
||||
ReloadParentStack(masm, promise, reason, kContextRegister, tmp, tmp2, tmp3);
|
||||
RestoreParentSuspender(masm, tmp);
|
||||
|
||||
__ Move(tmp, Operand(1));
|
||||
__ str(tmp,
|
||||
|
|
276
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
276
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
|
@ -3473,12 +3473,11 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
|
|||
namespace {
|
||||
// Check that the stack was in the old state (if generated code assertions are
|
||||
// enabled), and switch to the new state.
|
||||
void SwitchStackState(MacroAssembler* masm, Register jmpbuf,
|
||||
Register tmp,
|
||||
void SwitchStackState(MacroAssembler* masm, Register stack, Register tmp,
|
||||
wasm::JumpBuffer::StackState old_state,
|
||||
wasm::JumpBuffer::StackState new_state) {
|
||||
#if V8_ENABLE_SANDBOX
|
||||
__ Ldr(tmp.W(), MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
|
||||
__ Ldr(tmp.W(), MemOperand(stack, wasm::kStackStateOffset));
|
||||
__ Cmp(tmp.W(), old_state);
|
||||
Label ok;
|
||||
__ B(&ok, eq);
|
||||
|
@ -3486,7 +3485,7 @@ void SwitchStackState(MacroAssembler* masm, Register jmpbuf,
|
|||
__ bind(&ok);
|
||||
#endif
|
||||
__ Mov(tmp.W(), new_state);
|
||||
__ Str(tmp.W(), MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
|
||||
__ Str(tmp.W(), MemOperand(stack, wasm::kStackStateOffset));
|
||||
}
|
||||
|
||||
// Switch the stack pointer. Also switch the simulator's stack limit when
|
||||
|
@ -3494,77 +3493,55 @@ void SwitchStackState(MacroAssembler* masm, Register jmpbuf,
|
|||
// changing the stack pointer, as a mismatch between the stack pointer and the
|
||||
// simulator's stack limit can cause stack access check failures.
|
||||
void SwitchStackPointerAndSimulatorStackLimit(MacroAssembler* masm,
|
||||
Register jmpbuf, Register tmp) {
|
||||
Register stack, Register tmp) {
|
||||
if (masm->options().enable_simulator_code) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
temps.Exclude(x16);
|
||||
__ Ldr(tmp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
|
||||
__ Ldr(x16, MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset));
|
||||
__ Ldr(tmp, MemOperand(stack, wasm::kStackSpOffset));
|
||||
__ Ldr(x16, MemOperand(stack, wasm::kStackLimitOffset));
|
||||
__ Mov(sp, tmp);
|
||||
__ hlt(kImmExceptionIsSwitchStackLimit);
|
||||
} else {
|
||||
__ Ldr(tmp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
|
||||
__ Ldr(tmp, MemOperand(stack, wasm::kStackSpOffset));
|
||||
__ Mov(sp, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* pc,
|
||||
void FillJumpBuffer(MacroAssembler* masm, Register stack, Label* pc,
|
||||
Register tmp) {
|
||||
__ Mov(tmp, sp);
|
||||
__ Str(tmp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
|
||||
__ Str(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
|
||||
__ Str(tmp, MemOperand(stack, wasm::kStackSpOffset));
|
||||
__ Str(fp, MemOperand(stack, wasm::kStackFpOffset));
|
||||
__ LoadStackLimit(tmp, StackLimitKind::kRealStackLimit);
|
||||
__ Str(tmp, MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset));
|
||||
__ Str(tmp, MemOperand(stack, wasm::kStackLimitOffset));
|
||||
__ Adr(tmp, pc);
|
||||
__ Str(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
|
||||
__ Str(tmp, MemOperand(stack, wasm::kStackPcOffset));
|
||||
}
|
||||
|
||||
void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc,
|
||||
void LoadJumpBuffer(MacroAssembler* masm, Register stack, bool load_pc,
|
||||
Register tmp, wasm::JumpBuffer::StackState expected_state) {
|
||||
SwitchStackPointerAndSimulatorStackLimit(masm, jmpbuf, tmp);
|
||||
__ Ldr(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
|
||||
SwitchStackState(masm, jmpbuf, tmp, expected_state, wasm::JumpBuffer::Active);
|
||||
SwitchStackPointerAndSimulatorStackLimit(masm, stack, tmp);
|
||||
__ Ldr(fp, MemOperand(stack, wasm::kStackFpOffset));
|
||||
SwitchStackState(masm, stack, tmp, expected_state, wasm::JumpBuffer::Active);
|
||||
if (load_pc) {
|
||||
__ Ldr(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
|
||||
__ Ldr(tmp, MemOperand(stack, wasm::kStackPcOffset));
|
||||
__ Br(tmp);
|
||||
}
|
||||
// The stack limit in StackGuard is set separately under the ExecutionAccess
|
||||
// lock.
|
||||
}
|
||||
|
||||
void SaveState(MacroAssembler* masm, Register active_continuation,
|
||||
Register tmp, Label* suspend) {
|
||||
Register jmpbuf = tmp;
|
||||
__ LoadExternalPointerField(
|
||||
jmpbuf,
|
||||
FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.AcquireX();
|
||||
FillJumpBuffer(masm, jmpbuf, suspend, scratch);
|
||||
}
|
||||
|
||||
void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation,
|
||||
void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_stack,
|
||||
Register tmp,
|
||||
wasm::JumpBuffer::StackState expected_state) {
|
||||
Register target_jmpbuf = target_continuation;
|
||||
__ LoadExternalPointerField(
|
||||
target_jmpbuf,
|
||||
FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(target_jmpbuf, target_jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
__ Str(xzr,
|
||||
MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
|
||||
// Switch stack!
|
||||
LoadJumpBuffer(masm, target_jmpbuf, false, tmp, expected_state);
|
||||
LoadJumpBuffer(masm, target_stack, false, tmp, expected_state);
|
||||
}
|
||||
|
||||
// Updates the stack limit and central stack info, and validates the switch.
|
||||
void SwitchStacks(MacroAssembler* masm, Register old_continuation,
|
||||
bool return_switch,
|
||||
void SwitchStacks(MacroAssembler* masm, Register old_stack, bool return_switch,
|
||||
const std::initializer_list<CPURegister> keep) {
|
||||
using ER = ExternalReference;
|
||||
for (size_t i = 0; i < (keep.size() & ~0x1); i += 2) {
|
||||
|
@ -3575,8 +3552,9 @@ void SwitchStacks(MacroAssembler* masm, Register old_continuation,
|
|||
}
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
// Move {old_stack} first in case it aliases kCArgRegs[0].
|
||||
__ Mov(kCArgRegs[1], old_stack);
|
||||
__ Mov(kCArgRegs[0], ExternalReference::isolate_address(masm->isolate()));
|
||||
__ Mov(kCArgRegs[1], old_continuation);
|
||||
__ CallCFunction(
|
||||
return_switch ? ER::wasm_return_switch() : ER::wasm_switch_stacks(), 2);
|
||||
}
|
||||
|
@ -3588,52 +3566,34 @@ void SwitchStacks(MacroAssembler* masm, Register old_continuation,
|
|||
}
|
||||
}
|
||||
|
||||
void ReloadParentContinuation(MacroAssembler* masm, Register return_reg,
|
||||
Register return_value, Register context,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
Register active_continuation = tmp1;
|
||||
__ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
|
||||
void ReloadParentStack(MacroAssembler* masm, Register return_reg,
|
||||
Register return_value, Register context, Register tmp1,
|
||||
Register tmp2, Register tmp3) {
|
||||
Register active_stack = tmp1;
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
|
||||
// Set a null pointer in the jump buffer's SP slot to indicate to the stack
|
||||
// frame iterator that this stack is empty.
|
||||
Register jmpbuf = tmp2;
|
||||
__ LoadExternalPointerField(
|
||||
jmpbuf,
|
||||
FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
__ Str(xzr, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
|
||||
__ Str(xzr, MemOperand(active_stack, wasm::kStackSpOffset));
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.AcquireX();
|
||||
SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
|
||||
SwitchStackState(masm, active_stack, scratch, wasm::JumpBuffer::Active,
|
||||
wasm::JumpBuffer::Retired);
|
||||
}
|
||||
Register parent = tmp2;
|
||||
__ LoadTaggedField(parent,
|
||||
FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
__ Ldr(parent, MemOperand(active_stack, wasm::kStackParentOffset));
|
||||
|
||||
// Update active continuation root.
|
||||
int32_t active_continuation_offset =
|
||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveContinuation);
|
||||
__ Str(parent, MemOperand(kRootRegister, active_continuation_offset));
|
||||
jmpbuf = parent;
|
||||
__ LoadExternalPointerField(
|
||||
jmpbuf, FieldMemOperand(parent, WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
// Update active stack.
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), parent);
|
||||
|
||||
// Switch stack!
|
||||
SwitchStacks(masm, active_continuation, true,
|
||||
{return_reg, return_value, context, jmpbuf});
|
||||
LoadJumpBuffer(masm, jmpbuf, false, tmp3, wasm::JumpBuffer::Inactive);
|
||||
SwitchStacks(masm, active_stack, true,
|
||||
{return_reg, return_value, context, parent});
|
||||
LoadJumpBuffer(masm, parent, false, tmp3, wasm::JumpBuffer::Inactive);
|
||||
}
|
||||
|
||||
void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
||||
Register tmp2) {
|
||||
void RestoreParentSuspender(MacroAssembler* masm, Register tmp1) {
|
||||
Register suspender = tmp1;
|
||||
__ LoadRoot(suspender, RootIndex::kActiveSuspender);
|
||||
__ LoadTaggedField(
|
||||
|
@ -3840,32 +3800,27 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Save current state in active jump buffer.
|
||||
// -------------------------------------------
|
||||
Label resume;
|
||||
DEFINE_REG(continuation);
|
||||
__ LoadRoot(continuation, RootIndex::kActiveContinuation);
|
||||
DEFINE_REG(jmpbuf);
|
||||
DEFINE_REG(stack);
|
||||
__ LoadRootRelative(stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(scratch);
|
||||
__ LoadExternalPointerField(
|
||||
jmpbuf,
|
||||
FieldMemOperand(continuation, WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
FillJumpBuffer(masm, jmpbuf, &resume, scratch);
|
||||
SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
|
||||
FillJumpBuffer(masm, stack, &resume, scratch);
|
||||
SwitchStackState(masm, stack, scratch, wasm::JumpBuffer::Active,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
regs.ResetExcept(suspender, continuation);
|
||||
regs.ResetExcept(suspender, stack);
|
||||
|
||||
DEFINE_REG(suspender_continuation);
|
||||
__ LoadTaggedField(
|
||||
suspender_continuation,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
DEFINE_REG(suspender_stack);
|
||||
__ LoadExternalPointerField(
|
||||
suspender_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
if (v8_flags.debug_code) {
|
||||
// -------------------------------------------
|
||||
// Check that the suspender's continuation is the active continuation.
|
||||
// Check that the suspender's stack is the active stack.
|
||||
// -------------------------------------------
|
||||
// TODO(thibaudm): Once we add core stack-switching instructions, this
|
||||
// check will not hold anymore: it's possible that the active continuation
|
||||
// changed (due to an internal switch), so we have to update the suspender.
|
||||
__ cmp(suspender_continuation, continuation);
|
||||
// check will not hold anymore: it's possible that the active stack changed
|
||||
// (due to an internal switch), so we have to update the suspender.
|
||||
__ cmp(suspender_stack, stack);
|
||||
Label ok;
|
||||
__ B(&ok, eq);
|
||||
__ Trap();
|
||||
|
@ -3875,13 +3830,8 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
// Update roots.
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(caller);
|
||||
__ LoadTaggedField(caller,
|
||||
FieldMemOperand(suspender_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
int32_t active_continuation_offset =
|
||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveContinuation);
|
||||
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
|
||||
__ Ldr(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), caller);
|
||||
DEFINE_REG(parent);
|
||||
__ LoadTaggedField(
|
||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||
|
@ -3889,18 +3839,13 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveSuspender);
|
||||
__ Str(parent, MemOperand(kRootRegister, active_suspender_offset));
|
||||
regs.ResetExcept(suspender, caller, continuation);
|
||||
regs.ResetExcept(suspender, caller, stack);
|
||||
|
||||
// -------------------------------------------
|
||||
// Load jump buffer.
|
||||
// -------------------------------------------
|
||||
SwitchStacks(masm, continuation, false, {caller, suspender});
|
||||
FREE_REG(continuation);
|
||||
ASSIGN_REG(jmpbuf);
|
||||
__ LoadExternalPointerField(
|
||||
jmpbuf, FieldMemOperand(caller, WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
SwitchStacks(masm, stack, false, {caller, suspender});
|
||||
FREE_REG(stack);
|
||||
__ LoadTaggedField(
|
||||
kReturnRegister0,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
|
||||
|
@ -3908,7 +3853,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||
MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Str(xzr, GCScanSlotPlace);
|
||||
ASSIGN_REG(scratch)
|
||||
LoadJumpBuffer(masm, jmpbuf, true, scratch, wasm::JumpBuffer::Inactive);
|
||||
LoadJumpBuffer(masm, caller, true, scratch, wasm::JumpBuffer::Inactive);
|
||||
__ Trap();
|
||||
__ Bind(&resume, BranchTargetIdentifier::kBtiJump);
|
||||
__ LeaveFrame(StackFrame::STACK_SWITCH);
|
||||
|
@ -3962,23 +3907,15 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
// Save current state.
|
||||
// -------------------------------------------
|
||||
Label suspend;
|
||||
DEFINE_REG(active_continuation);
|
||||
__ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
|
||||
DEFINE_REG(current_jmpbuf);
|
||||
DEFINE_REG(active_stack);
|
||||
__ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
|
||||
DEFINE_REG(scratch);
|
||||
__ LoadExternalPointerField(
|
||||
current_jmpbuf,
|
||||
FieldMemOperand(active_continuation,
|
||||
WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(current_jmpbuf, current_jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
FillJumpBuffer(masm, current_jmpbuf, &suspend, scratch);
|
||||
SwitchStackState(masm, current_jmpbuf, scratch, wasm::JumpBuffer::Active,
|
||||
FillJumpBuffer(masm, active_stack, &suspend, scratch);
|
||||
SwitchStackState(masm, active_stack, scratch, wasm::JumpBuffer::Active,
|
||||
wasm::JumpBuffer::Inactive);
|
||||
FREE_REG(current_jmpbuf);
|
||||
|
||||
// -------------------------------------------
|
||||
// Set the suspender and continuation parents and update the roots
|
||||
// Set the suspender and stack parents and update the roots
|
||||
// -------------------------------------------
|
||||
DEFINE_REG(active_suspender);
|
||||
__ LoadRoot(active_suspender, RootIndex::kActiveSuspender);
|
||||
|
@ -3993,56 +3930,32 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
RootIndex::kActiveSuspender);
|
||||
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
|
||||
|
||||
// Next line we are going to load a field from suspender, but we have to use
|
||||
// the same register for target_continuation to use it in RecordWriteField.
|
||||
// So, free suspender here to use pinned reg, but load from it next line.
|
||||
DEFINE_REG(target_stack);
|
||||
__ LoadExternalPointerField(
|
||||
target_stack,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
FREE_REG(suspender);
|
||||
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
|
||||
suspender = target_continuation;
|
||||
__ LoadTaggedField(
|
||||
target_continuation,
|
||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||
suspender = no_reg;
|
||||
|
||||
__ StoreTaggedField(
|
||||
active_continuation,
|
||||
FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
DEFINE_REG(old_continuation);
|
||||
__ Move(old_continuation, active_continuation);
|
||||
__ RecordWriteField(
|
||||
target_continuation, WasmContinuationObject::kParentOffset,
|
||||
active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore);
|
||||
int32_t active_continuation_offset =
|
||||
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||
RootIndex::kActiveContinuation);
|
||||
__ Str(target_continuation,
|
||||
MemOperand(kRootRegister, active_continuation_offset));
|
||||
__ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
|
||||
|
||||
SwitchStacks(masm, old_continuation, false, {target_continuation});
|
||||
SwitchStacks(masm, active_stack, false, {target_stack});
|
||||
|
||||
regs.ResetExcept(target_continuation);
|
||||
regs.ResetExcept(target_stack);
|
||||
|
||||
// -------------------------------------------
|
||||
// Load state from target jmpbuf (longjmp).
|
||||
// -------------------------------------------
|
||||
regs.Reserve(kReturnRegister0);
|
||||
DEFINE_REG(target_jmpbuf);
|
||||
ASSIGN_REG(scratch);
|
||||
__ LoadExternalPointerField(
|
||||
target_jmpbuf,
|
||||
FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kStackOffset),
|
||||
kWasmStackMemoryTag);
|
||||
__ Add(target_jmpbuf, target_jmpbuf, wasm::StackMemory::jmpbuf_offset());
|
||||
// Move resolved value to return register.
|
||||
__ Ldr(kReturnRegister0, MemOperand(fp, 3 * kSystemPointerSize));
|
||||
MemOperand GCScanSlotPlace =
|
||||
MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
|
||||
__ Str(xzr, GCScanSlotPlace);
|
||||
if (on_resume == wasm::OnResume::kThrow) {
|
||||
// Switch to the continuation's stack without restoring the PC.
|
||||
LoadJumpBuffer(masm, target_jmpbuf, false, scratch,
|
||||
// Switch without restoring the PC.
|
||||
LoadJumpBuffer(masm, target_stack, false, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
// Pop this frame now. The unwinder expects that the first STACK_SWITCH
|
||||
// frame is the outermost one.
|
||||
|
@ -4051,8 +3964,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||
__ Push(xzr, kReturnRegister0);
|
||||
__ CallRuntime(Runtime::kThrow);
|
||||
} else {
|
||||
// Resume the continuation normally.
|
||||
LoadJumpBuffer(masm, target_jmpbuf, true, scratch,
|
||||
// Resume the stack normally.
|
||||
LoadJumpBuffer(masm, target_stack, true, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
}
|
||||
__ Trap();
|
||||
|
@ -4083,24 +3996,21 @@ void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
Label* suspend) {
|
||||
ResetStackSwitchFrameStackSlots(masm);
|
||||
DEFINE_SCOPED(scratch)
|
||||
DEFINE_REG(target_continuation)
|
||||
__ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
|
||||
DEFINE_REG(parent_continuation)
|
||||
__ LoadTaggedField(parent_continuation,
|
||||
FieldMemOperand(target_continuation,
|
||||
WasmContinuationObject::kParentOffset));
|
||||
SaveState(masm, parent_continuation, scratch, suspend);
|
||||
SwitchStacks(masm, parent_continuation, false,
|
||||
{wasm_instance, wrapper_buffer});
|
||||
FREE_REG(parent_continuation);
|
||||
DEFINE_REG(parent_stack)
|
||||
__ LoadRootRelative(parent_stack, IsolateData::active_stack_offset());
|
||||
__ Ldr(parent_stack, MemOperand(parent_stack, wasm::kStackParentOffset));
|
||||
FillJumpBuffer(masm, parent_stack, suspend, scratch);
|
||||
SwitchStacks(masm, parent_stack, false, {wasm_instance, wrapper_buffer});
|
||||
FREE_REG(parent_stack);
|
||||
// Save the old stack's fp in x9, and use it to access the parameters in
|
||||
// the parent frame.
|
||||
regs.Pinned(x9, &original_fp);
|
||||
__ Mov(original_fp, fp);
|
||||
__ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
|
||||
LoadTargetJumpBuffer(masm, target_continuation, scratch,
|
||||
DEFINE_REG(target_stack);
|
||||
__ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
|
||||
LoadTargetJumpBuffer(masm, target_stack, scratch,
|
||||
wasm::JumpBuffer::Suspended);
|
||||
FREE_REG(target_continuation);
|
||||
FREE_REG(target_stack);
|
||||
// Push the loaded fp. We know it is null, because there is no frame yet,
|
||||
// so we could also push 0 directly. In any case we need to push it,
|
||||
// because this marks the base of the stack segment for
|
||||
|
@ -4158,9 +4068,9 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs,
|
|||
MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
|
||||
GetContextFromImplicitArg(masm, kContextRegister, tmp);
|
||||
|
||||
ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp,
|
||||
tmp2, tmp3);
|
||||
RestoreParentSuspender(masm, tmp, tmp2);
|
||||
ReloadParentStack(masm, promise, return_value, kContextRegister, tmp, tmp2,
|
||||
tmp3);
|
||||
RestoreParentSuspender(masm, tmp);
|
||||
|
||||
if (mode == wasm::kPromise) {
|
||||
__ Mov(tmp, 1);
|
||||
|
@ -4208,9 +4118,8 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
|
|||
DEFINE_SCOPED(tmp2);
|
||||
DEFINE_SCOPED(tmp3);
|
||||
GetContextFromImplicitArg(masm, kContextRegister, tmp);
|
||||
ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2,
|
||||
tmp3);
|
||||
RestoreParentSuspender(masm, tmp, tmp2);
|
||||
ReloadParentStack(masm, promise, reason, kContextRegister, tmp, tmp2, tmp3);
|
||||
RestoreParentSuspender(masm, tmp);
|
||||
|
||||
__ Mov(tmp, 1);
|
||||
__ Str(tmp,
|
||||
|
@ -4718,10 +4627,17 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
|
|||
__ Ldr(cp, MemOperand(cp));
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.AcquireX();
|
||||
temps.Exclude(x16);
|
||||
Register scratch = x1;
|
||||
__ Mov(scratch, ER::Create(IsolateAddressId::kPendingHandlerSPAddress,
|
||||
masm->isolate()));
|
||||
__ Ldr(scratch, MemOperand(scratch));
|
||||
if (masm->options().enable_simulator_code) {
|
||||
// Update the simulator stack limit in case the exception was caught in a
|
||||
// different stack.
|
||||
__ LoadStackLimit(x16, StackLimitKind::kRealStackLimit);
|
||||
__ hlt(kImmExceptionIsSwitchStackLimit);
|
||||
}
|
||||
__ Mov(sp, scratch);
|
||||
}
|
||||
__ Mov(fp, ER::Create(IsolateAddressId::kPendingHandlerFPAddress,
|
||||
|
|
4
deps/v8/src/builtins/base.tq
vendored
4
deps/v8/src/builtins/base.tq
vendored
|
@ -274,6 +274,7 @@ extern class NameToIndexHashTable extends HashTable;
|
|||
extern class RegisteredSymbolTable extends HashTable;
|
||||
extern class NameDictionary extends HashTable;
|
||||
extern class GlobalDictionary extends HashTable;
|
||||
extern class SimpleNameDictionary extends HashTable;
|
||||
extern class SimpleNumberDictionary extends HashTable;
|
||||
extern class EphemeronHashTable extends HashTable;
|
||||
type ObjectHashTable extends HashTable
|
||||
|
@ -1315,6 +1316,7 @@ extern macro IsInteger(JSAny): bool;
|
|||
extern macro IsInteger(HeapNumber): bool;
|
||||
|
||||
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
|
||||
extern macro AllocateContextCell(Object): ContextCell;
|
||||
extern macro ChangeInt32ToTagged(int32): Number;
|
||||
extern macro ChangeUint32ToTagged(uint32): Number;
|
||||
extern macro ChangeUintPtrToFloat64(uintptr): float64;
|
||||
|
@ -1462,8 +1464,6 @@ extern macro IsTypedArraySpeciesProtectorCellInvalid(): bool;
|
|||
extern macro IsPromiseSpeciesProtectorCellInvalid(): bool;
|
||||
extern macro IsMockArrayBufferAllocatorFlag(): bool;
|
||||
extern macro HasBuiltinSubclassingFlag(): bool;
|
||||
extern macro IsScriptContextMutableHeapNumberFlag(): bool;
|
||||
extern macro IsScriptContextMutableHeapInt32Flag(): bool;
|
||||
extern macro IsPrototypeTypedArrayPrototype(
|
||||
implicit context: Context)(Map): bool;
|
||||
extern macro IsSetIteratorProtectorCellInvalid(): bool;
|
||||
|
|
5
deps/v8/src/builtins/builtins-array-gen.cc
vendored
5
deps/v8/src/builtins/builtins-array-gen.cc
vendored
|
@ -551,7 +551,7 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
|
|||
{
|
||||
Label allocate_js_array(this);
|
||||
|
||||
TNode<Map> array_map = CAST(LoadContextElement(
|
||||
TNode<Map> array_map = CAST(LoadContextElementNoCell(
|
||||
context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
|
||||
|
||||
TNode<IntPtrT> capacity = IntPtrConstant(0);
|
||||
|
@ -1687,7 +1687,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
|
|||
// Make elements kind holey and update elements kind in the type info.
|
||||
var_elements_kind = Word32Or(var_elements_kind.value(), Int32Constant(1));
|
||||
StoreObjectFieldNoWriteBarrier(
|
||||
*allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
|
||||
*allocation_site,
|
||||
offsetof(AllocationSite, transition_info_or_boilerplate_),
|
||||
SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask)));
|
||||
Goto(&normal_sequence);
|
||||
}
|
||||
|
|
35
deps/v8/src/builtins/builtins-array.cc
vendored
35
deps/v8/src/builtins/builtins-array.cc
vendored
|
@ -95,7 +95,7 @@ void MatchArrayElementsKindToArguments(Isolate* isolate,
|
|||
// Use a short-lived HandleScope to avoid creating several copies of the
|
||||
// elements handle which would cause issues when left-trimming later-on.
|
||||
HandleScope scope(isolate);
|
||||
JSObject::TransitionElementsKind(array, target_kind);
|
||||
JSObject::TransitionElementsKind(isolate, array, target_kind);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ V8_WARN_UNUSED_RESULT MaybeDirectHandle<Object> SetLengthProperty(
|
|||
if (!JSArray::HasReadOnlyLength(array)) {
|
||||
DCHECK_LE(length, kMaxUInt32);
|
||||
MAYBE_RETURN_NULL(
|
||||
JSArray::SetLength(array, static_cast<uint32_t>(length)));
|
||||
JSArray::SetLength(isolate, array, static_cast<uint32_t>(length)));
|
||||
return receiver;
|
||||
}
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ V8_WARN_UNUSED_RESULT MaybeDirectHandle<Map> GetReplacedElementsKindsMap(
|
|||
Tagged<Context> native_context = map->map()->native_context();
|
||||
if (native_context->GetInitialJSArrayMap(origin_kind) == map) {
|
||||
Tagged<Object> maybe_target_map =
|
||||
native_context->get(Context::ArrayMapIndex(target_kind));
|
||||
native_context->GetNoCell(Context::ArrayMapIndex(target_kind));
|
||||
if (Tagged<Map> target_map; TryCast<Map>(maybe_target_map, &target_map)) {
|
||||
map->NotifyLeafMapLayoutChange(isolate);
|
||||
return direct_handle(target_map, isolate);
|
||||
|
@ -323,7 +323,7 @@ V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
|
|||
if (IsMoreGeneralElementsKindTransition(origin_kind, target_kind)) {
|
||||
// Transition through the allocation site as well if present, but
|
||||
// only if this is a forward transition.
|
||||
JSObject::UpdateAllocationSite(array, target_kind);
|
||||
JSObject::UpdateAllocationSite(isolate, array, target_kind);
|
||||
}
|
||||
did_transition_map = true;
|
||||
}
|
||||
|
@ -331,12 +331,12 @@ V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
|
|||
|
||||
if (!did_transition_map) {
|
||||
target_kind = GetMoreGeneralElementsKind(origin_kind, target_kind);
|
||||
JSObject::TransitionElementsKind(array, target_kind);
|
||||
JSObject::TransitionElementsKind(isolate, array, target_kind);
|
||||
}
|
||||
}
|
||||
|
||||
ElementsAccessor* accessor = array->GetElementsAccessor();
|
||||
accessor->Fill(array, value, start, end).Check();
|
||||
accessor->Fill(isolate, array, value, start, end).Check();
|
||||
|
||||
// It's possible the JSArray's 'length' property was assigned to after the
|
||||
// length was loaded due to user code during argument coercion of the start
|
||||
|
@ -347,7 +347,7 @@ V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
|
|||
// need to ensure the JSArray's length is correctly set in case the user
|
||||
// assigned a smaller value.
|
||||
if (Object::NumberValue(array->length()) < end) {
|
||||
CHECK(accessor->SetLength(array, end).FromJust());
|
||||
CHECK(accessor->SetLength(isolate, array, end).FromJust());
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -496,7 +496,7 @@ BUILTIN(ArrayPush) {
|
|||
ElementsAccessor* accessor = array->GetElementsAccessor();
|
||||
uint32_t new_length;
|
||||
MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, new_length, accessor->Push(array, &args, to_add));
|
||||
isolate, new_length, accessor->Push(isolate, array, &args, to_add));
|
||||
return *isolate->factory()->NewNumberFromUint((new_length));
|
||||
}
|
||||
|
||||
|
@ -579,7 +579,7 @@ BUILTIN(ArrayPop) {
|
|||
if (IsJSArrayFastElementMovingAllowed(isolate, *array)) {
|
||||
// Fast Elements Path
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, result, array->GetElementsAccessor()->Pop(array));
|
||||
isolate, result, array->GetElementsAccessor()->Pop(isolate, array));
|
||||
} else {
|
||||
// Use Slow Lookup otherwise
|
||||
uint32_t new_length = len - 1;
|
||||
|
@ -596,7 +596,7 @@ BUILTIN(ArrayPop) {
|
|||
}
|
||||
bool set_len_ok;
|
||||
MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, set_len_ok, JSArray::SetLength(array, new_length));
|
||||
isolate, set_len_ok, JSArray::SetLength(isolate, array, new_length));
|
||||
}
|
||||
|
||||
return *result;
|
||||
|
@ -704,8 +704,8 @@ BUILTIN(ArrayShift) {
|
|||
|
||||
if (CanUseFastArrayShift(isolate, receiver)) {
|
||||
DirectHandle<JSArray> array = Cast<JSArray>(receiver);
|
||||
RETURN_RESULT_OR_FAILURE(isolate,
|
||||
array->GetElementsAccessor()->Shift(array));
|
||||
RETURN_RESULT_OR_FAILURE(
|
||||
isolate, array->GetElementsAccessor()->Shift(isolate, array));
|
||||
}
|
||||
|
||||
return GenericArrayShift(isolate, receiver, length);
|
||||
|
@ -735,7 +735,7 @@ BUILTIN(ArrayUnshift) {
|
|||
ElementsAccessor* accessor = array->GetElementsAccessor();
|
||||
uint32_t new_length;
|
||||
MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, new_length, accessor->Unshift(array, &args, to_add));
|
||||
isolate, new_length, accessor->Unshift(isolate, array, &args, to_add));
|
||||
return Smi::FromInt(new_length);
|
||||
}
|
||||
|
||||
|
@ -855,7 +855,8 @@ class ArrayConcatVisitor {
|
|||
DirectHandle<Number> length =
|
||||
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
|
||||
DirectHandle<Map> map = JSObject::GetElementsTransitionMap(
|
||||
array, fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
|
||||
isolate_, array,
|
||||
fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
|
||||
{
|
||||
DisallowGarbageCollection no_gc;
|
||||
Tagged<JSArray> raw = *array;
|
||||
|
@ -1109,7 +1110,7 @@ void CollectElementIndices(Isolate* isolate, DirectHandle<JSObject> object,
|
|||
Tagged<JSObject> raw_object = *object;
|
||||
ElementsAccessor* accessor = object->GetElementsAccessor();
|
||||
for (uint32_t i = 0; i < range; i++) {
|
||||
if (accessor->HasElement(raw_object, i, elements)) {
|
||||
if (accessor->HasElement(isolate, raw_object, i, elements)) {
|
||||
indices->push_back(i);
|
||||
}
|
||||
}
|
||||
|
@ -1129,7 +1130,7 @@ void CollectElementIndices(Isolate* isolate, DirectHandle<JSObject> object,
|
|||
}
|
||||
ElementsAccessor* accessor = object->GetElementsAccessor();
|
||||
for (; i < range; i++) {
|
||||
if (accessor->HasElement(*object, i)) {
|
||||
if (accessor->HasElement(isolate, *object, i)) {
|
||||
indices->push_back(i);
|
||||
}
|
||||
}
|
||||
|
@ -1147,7 +1148,7 @@ void CollectElementIndices(Isolate* isolate, DirectHandle<JSObject> object,
|
|||
for (uint32_t i = 0; i < length; i++) {
|
||||
// JSSharedArrays are created non-resizable and do not have holes.
|
||||
SLOW_DCHECK(object->GetElementsAccessor()->HasElement(
|
||||
*object, i, object->elements()));
|
||||
isolate, *object, i, object->elements()));
|
||||
indices->push_back(i);
|
||||
}
|
||||
if (length == range) return;
|
||||
|
|
6
deps/v8/src/builtins/builtins-arraybuffer.cc
vendored
6
deps/v8/src/builtins/builtins-arraybuffer.cc
vendored
|
@ -176,6 +176,12 @@ BUILTIN(ArrayBufferConstructor) {
|
|||
}
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_max_length,
|
||||
Object::ToInteger(isolate, max_length));
|
||||
if (Object::NumberValue(*number_length) >
|
||||
Object::NumberValue(*number_max_length)) {
|
||||
THROW_NEW_ERROR_RETURN_FAILURE(
|
||||
isolate,
|
||||
NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
|
||||
}
|
||||
}
|
||||
return ConstructBuffer(isolate, target, new_target, number_length,
|
||||
number_max_length, InitializedFlag::kZeroInitialized);
|
||||
|
|
|
@ -25,11 +25,12 @@ BUILTIN(AsyncDisposableStackOnFulfilled) {
|
|||
HandleScope scope(isolate);
|
||||
|
||||
DirectHandle<JSDisposableStackBase> stack(
|
||||
Cast<JSDisposableStackBase>(isolate->context()->get(static_cast<int>(
|
||||
Cast<
|
||||
JSDisposableStackBase>(isolate->context()->GetNoCell(static_cast<int>(
|
||||
JSDisposableStackBase::AsyncDisposableStackContextSlots::kStack))),
|
||||
isolate);
|
||||
DirectHandle<JSPromise> promise(
|
||||
Cast<JSPromise>(isolate->context()->get(static_cast<int>(
|
||||
Cast<JSPromise>(isolate->context()->GetNoCell(static_cast<int>(
|
||||
JSDisposableStackBase::AsyncDisposableStackContextSlots::
|
||||
kOuterPromise))),
|
||||
isolate);
|
||||
|
@ -44,11 +45,12 @@ BUILTIN(AsyncDisposableStackOnRejected) {
|
|||
HandleScope scope(isolate);
|
||||
|
||||
DirectHandle<JSDisposableStackBase> stack(
|
||||
Cast<JSDisposableStackBase>(isolate->context()->get(static_cast<int>(
|
||||
Cast<
|
||||
JSDisposableStackBase>(isolate->context()->GetNoCell(static_cast<int>(
|
||||
JSDisposableStackBase::AsyncDisposableStackContextSlots::kStack))),
|
||||
isolate);
|
||||
DirectHandle<JSPromise> promise(
|
||||
Cast<JSPromise>(isolate->context()->get(static_cast<int>(
|
||||
Cast<JSPromise>(isolate->context()->GetNoCell(static_cast<int>(
|
||||
JSDisposableStackBase::AsyncDisposableStackContextSlots::
|
||||
kOuterPromise))),
|
||||
isolate);
|
||||
|
@ -82,7 +84,7 @@ BUILTIN(AsyncDisposeFromSyncDispose) {
|
|||
|
||||
// c. Let result be Completion(Call(method, O)).
|
||||
DirectHandle<JSFunction> sync_method(
|
||||
Cast<JSFunction>(isolate->context()->get(static_cast<int>(
|
||||
Cast<JSFunction>(isolate->context()->GetNoCell(static_cast<int>(
|
||||
JSDisposableStackBase::AsyncDisposeFromSyncDisposeContextSlots::
|
||||
kMethod))),
|
||||
isolate);
|
||||
|
@ -375,7 +377,7 @@ BUILTIN(AsyncDisposableStackPrototypeMove) {
|
|||
// 5. Set newAsyncDisposableStack.[[AsyncDisposableState]] to pending.
|
||||
|
||||
Tagged<JSFunction> constructor_function =
|
||||
Cast<JSFunction>(isolate->native_context()->get(
|
||||
Cast<JSFunction>(isolate->native_context()->GetNoCell(
|
||||
Context::JS_ASYNC_DISPOSABLE_STACK_FUNCTION_INDEX));
|
||||
DirectHandle<Map> map(constructor_function->initial_map(), isolate);
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
|
|||
resume_mode == JSGeneratorObject::kThrow);
|
||||
|
||||
TNode<JSAsyncFunctionObject> async_function_object =
|
||||
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
|
||||
CAST(LoadContextElementNoCell(context, Context::EXTENSION_INDEX));
|
||||
|
||||
// Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
|
||||
// unnecessary runtime checks removed.
|
||||
|
@ -93,7 +93,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
|
|||
|
||||
// Allocate and initialize the async function object.
|
||||
TNode<NativeContext> native_context = LoadNativeContext(context);
|
||||
TNode<Map> async_function_object_map = CAST(LoadContextElement(
|
||||
TNode<Map> async_function_object_map = CAST(LoadContextElementNoCell(
|
||||
native_context, Context::ASYNC_FUNCTION_OBJECT_MAP_INDEX));
|
||||
TNode<JSAsyncFunctionObject> async_function_object =
|
||||
UncheckedCast<JSAsyncFunctionObject>(
|
||||
|
|
16
deps/v8/src/builtins/builtins-async-gen.cc
vendored
16
deps/v8/src/builtins/builtins-async-gen.cc
vendored
|
@ -63,8 +63,8 @@ TNode<Object> AsyncBuiltinsAssembler::Await(
|
|||
// is the (initial) Promise.prototype and the @@species protector is
|
||||
// intact, as that guards the lookup path for "constructor" on
|
||||
// JSPromise instances which have the (initial) Promise.prototype.
|
||||
const TNode<Object> promise_prototype =
|
||||
LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
|
||||
const TNode<Object> promise_prototype = LoadContextElementNoCell(
|
||||
native_context, Context::PROMISE_PROTOTYPE_INDEX);
|
||||
GotoIfNot(TaggedEqual(LoadMapPrototype(value_map), promise_prototype),
|
||||
&if_slow_constructor);
|
||||
Branch(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor,
|
||||
|
@ -78,8 +78,8 @@ TNode<Object> AsyncBuiltinsAssembler::Await(
|
|||
{
|
||||
const TNode<Object> value_constructor = GetProperty(
|
||||
context, value, isolate()->factory()->constructor_string());
|
||||
const TNode<Object> promise_function =
|
||||
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
|
||||
const TNode<Object> promise_function = LoadContextElementNoCell(
|
||||
native_context, Context::PROMISE_FUNCTION_INDEX);
|
||||
Branch(TaggedEqual(value_constructor, promise_function), &if_done,
|
||||
&if_slow_path);
|
||||
}
|
||||
|
@ -105,14 +105,14 @@ TNode<Object> AsyncBuiltinsAssembler::Await(
|
|||
UncheckedCast<Context>(AllocateInNewSpace(kClosureContextSize));
|
||||
{
|
||||
// Initialize the await context, storing the {generator} as extension.
|
||||
TNode<Map> map = CAST(
|
||||
LoadContextElement(native_context, Context::AWAIT_CONTEXT_MAP_INDEX));
|
||||
TNode<Map> map = CAST(LoadContextElementNoCell(
|
||||
native_context, Context::AWAIT_CONTEXT_MAP_INDEX));
|
||||
StoreMapNoWriteBarrier(closure_context, map);
|
||||
StoreObjectFieldNoWriteBarrier(
|
||||
closure_context, Context::kLengthOffset,
|
||||
SmiConstant(Context::MIN_CONTEXT_EXTENDED_SLOTS));
|
||||
const TNode<Object> empty_scope_info =
|
||||
LoadContextElement(native_context, Context::SCOPE_INFO_INDEX);
|
||||
LoadContextElementNoCell(native_context, Context::SCOPE_INFO_INDEX);
|
||||
StoreContextElementNoWriteBarrier(
|
||||
closure_context, Context::SCOPE_INFO_INDEX, empty_scope_info);
|
||||
StoreContextElementNoWriteBarrier(closure_context, Context::PREVIOUS_INDEX,
|
||||
|
@ -183,7 +183,7 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
|
|||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
|
||||
const TNode<Object> done =
|
||||
LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
|
||||
LoadContextElementNoCell(context, ValueUnwrapContext::kDoneSlot);
|
||||
CSA_DCHECK(this, IsBoolean(CAST(done)));
|
||||
|
||||
const TNode<Object> unwrapped_value =
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue