ZJIT: Enable or remove comments from YJIT (#14214)

This commit is contained in:
Takashi Kokubun 2025-08-13 17:32:29 -07:00 committed by GitHub
parent c9346a166c
commit cb281653ad
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 12 additions and 466 deletions

View file

@ -47,7 +47,6 @@ impl From<BranchCond> for [u8; 4] {
} }
} }
/*
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -77,4 +76,3 @@ mod tests {
assert_eq!(0x54800000, result); assert_eq!(0x54800000, result);
} }
} }
*/

View file

@ -60,7 +60,6 @@ impl From<Conditional> for [u8; 4] {
} }
} }
/*
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -72,4 +71,3 @@ mod tests {
assert_eq!(0x9a821020, result); assert_eq!(0x9a821020, result);
} }
} }
*/

View file

@ -345,7 +345,6 @@ pub fn uimm_num_bits(uimm: u64) -> u8
return 64; return 64;
} }
/*
#[cfg(test)] #[cfg(test)]
mod tests mod tests
{ {
@ -381,32 +380,5 @@ mod tests
assert_eq!(uimm_num_bits((u32::MAX as u64) + 1), 64); assert_eq!(uimm_num_bits((u32::MAX as u64) + 1), 64);
assert_eq!(uimm_num_bits(u64::MAX), 64); assert_eq!(uimm_num_bits(u64::MAX), 64);
} }
#[test]
fn test_code_size() {
// Write 4 bytes in the first page
let mut cb = CodeBlock::new_dummy(CodeBlock::PREFERRED_CODE_PAGE_SIZE * 2);
cb.write_bytes(&[0, 0, 0, 0]);
assert_eq!(cb.code_size(), 4);
// Moving to the next page should not increase code_size
cb.next_page(cb.get_write_ptr(), |_, _| {});
assert_eq!(cb.code_size(), 4);
// Write 4 bytes in the second page
cb.write_bytes(&[0, 0, 0, 0]);
assert_eq!(cb.code_size(), 8);
// Rewrite 4 bytes in the first page
let old_write_pos = cb.get_write_pos();
cb.set_pos(0);
cb.write_bytes(&[1, 1, 1, 1]);
// Moving from an old page to the next page should not increase code_size
cb.next_page(cb.get_write_ptr(), |_, _| {});
cb.set_pos(old_write_pos);
assert_eq!(cb.code_size(), 8);
}
} }
*/

View file

@ -317,34 +317,6 @@ pub fn mem_opnd_sib(num_bits: u8, base_opnd: X86Opnd, index_opnd: X86Opnd, scale
} }
} }
/*
// Struct member operand
#define member_opnd(base_reg, struct_type, member_name) mem_opnd( \
8 * sizeof(((struct_type*)0)->member_name), \
base_reg, \
offsetof(struct_type, member_name) \
)
// Struct member operand with an array index
#define member_opnd_idx(base_reg, struct_type, member_name, idx) mem_opnd( \
8 * sizeof(((struct_type*)0)->member_name[0]), \
base_reg, \
(offsetof(struct_type, member_name) + \
sizeof(((struct_type*)0)->member_name[0]) * idx) \
)
*/
/*
// TODO: this should be a method, X86Opnd.resize() or X86Opnd.subreg()
static x86opnd_t resize_opnd(x86opnd_t opnd, uint32_t num_bits)
{
assert (num_bits % 8 == 0);
x86opnd_t sub = opnd;
sub.num_bits = num_bits;
return sub;
}
*/
pub fn imm_opnd(value: i64) -> X86Opnd pub fn imm_opnd(value: i64) -> X86Opnd
{ {
X86Opnd::Imm(X86Imm { num_bits: imm_num_bits(value), value }) X86Opnd::Imm(X86Imm { num_bits: imm_num_bits(value), value })
@ -1103,46 +1075,6 @@ pub fn movsx(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) {
} }
} }
/*
/// movzx - Move with zero extension (unsigned values)
void movzx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
{
cb.writeASM("movzx", dst, src);
uint32_t dstSize;
if (dst.isReg)
dstSize = dst.reg.size;
else
assert (false, "movzx dst must be a register");
uint32_t srcSize;
if (src.isReg)
srcSize = src.reg.size;
else if (src.isMem)
srcSize = src.mem.size;
else
assert (false);
assert (
srcSize < dstSize,
"movzx: srcSize >= dstSize"
);
if (srcSize is 8)
{
cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB6)(dstSize is 16, dstSize is 64, dst, src);
}
else if (srcSize is 16)
{
cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB7)(dstSize is 16, dstSize is 64, dst, src);
}
else
{
assert (false, "invalid src operand size for movxz");
}
}
*/
/// nop - Noop, one or multiple bytes long /// nop - Noop, one or multiple bytes long
pub fn nop(cb: &mut CodeBlock, length: u32) { pub fn nop(cb: &mut CodeBlock, length: u32) {
match length { match length {

View file

@ -317,7 +317,7 @@ impl Assembler
asm.load(opnd) asm.load(opnd)
} }
}, },
Opnd::None | Opnd::Value(_) /*| Opnd::Stack { .. }*/ => unreachable!() Opnd::None | Opnd::Value(_) => unreachable!()
} }
} }
@ -1742,13 +1742,12 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 0); asm.compile_with_num_regs(&mut cb, 0);
} }
/*
#[test] #[test]
fn test_emit_lea_label() { fn test_emit_lea_label() {
let (mut asm, mut cb) = setup_asm(); let (mut asm, mut cb) = setup_asm();
let label = asm.new_label("label"); let label = asm.new_label("label");
let opnd = asm.lea_jump_target(label); let opnd = asm.lea_jump_target(label.clone());
asm.write_label(label); asm.write_label(label);
asm.bake_string("Hello, world!"); asm.bake_string("Hello, world!");
@ -1756,7 +1755,6 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 1); asm.compile_with_num_regs(&mut cb, 1);
} }
*/
#[test] #[test]
fn test_emit_load_mem_disp_fits_into_load() { fn test_emit_load_mem_disp_fits_into_load() {
@ -1967,48 +1965,6 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 2); asm.compile_with_num_regs(&mut cb, 2);
} }
/*
#[test]
fn test_bcond_straddling_code_pages() {
const LANDING_PAGE: usize = 65;
let mut asm = Assembler::new(0);
let mut cb = CodeBlock::new_dummy_with_freed_pages(vec![0, LANDING_PAGE]);
// Skip to near the end of the page. Room for two instructions.
cb.set_pos(cb.page_start_pos() + cb.page_end() - 8);
let end = asm.new_label("end");
// Start with a conditional jump...
asm.jz(end);
// A few instructions, enough to cause a page switch.
let sum = asm.add(399.into(), 111.into());
let xorred = asm.xor(sum, 859.into());
asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), xorred);
asm.store(Opnd::mem(64, Opnd::Reg(X0_REG), 0), xorred);
// The branch target. It should be in the landing page.
asm.write_label(end);
asm.cret(xorred);
// [Bug #19385]
// This used to panic with "The offset must be 19 bits or less."
// due to attempting to lower the `asm.jz` above to a `b.e` with an offset that's > 1 MiB.
let starting_pos = cb.get_write_pos();
asm.compile_with_num_regs(&mut cb, 2);
let gap = cb.get_write_pos() - starting_pos;
assert!(gap > 0b1111111111111111111);
let instruction_at_starting_pos: [u8; 4] = unsafe {
std::slice::from_raw_parts(cb.get_ptr(starting_pos).raw_ptr(&cb), 4)
}.try_into().unwrap();
assert_eq!(
0b000101 << 26_u32,
u32::from_le_bytes(instruction_at_starting_pos) & (0b111111 << 26_u32),
"starting instruction should be an unconditional branch to the new page (B)"
);
}
#[test] #[test]
fn test_emit_xor() { fn test_emit_xor() {
let (mut asm, mut cb) = setup_asm(); let (mut asm, mut cb) = setup_asm();
@ -2018,9 +1974,9 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 1); asm.compile_with_num_regs(&mut cb, 1);
assert_disasm!(cb, "0b0001ca4b0000f8", " assert_disasm!(cb, "000001ca400000f8", "
0x0: eor x11, x0, x1 0x0: eor x0, x0, x1
0x4: stur x11, [x2] 0x4: stur x0, [x2]
"); ");
} }
@ -2082,10 +2038,10 @@ mod tests {
asm.mov(Opnd::Reg(TEMP_REGS[0]), out); asm.mov(Opnd::Reg(TEMP_REGS[0]), out);
asm.compile_with_num_regs(&mut cb, 2); asm.compile_with_num_regs(&mut cb, 2);
assert_disasm!(cb, "8b0280d20c0080d261b18c9a", {" assert_disasm!(cb, "800280d2010080d201b0819a", {"
0x0: mov x11, #0x14 0x0: mov x0, #0x14
0x4: mov x12, #0 0x4: mov x1, #0
0x8: csel x1, x11, x12, lt 0x8: csel x1, x0, x1, lt
"}); "});
} }
@ -2098,11 +2054,9 @@ mod tests {
asm.mov(Opnd::Reg(TEMP_REGS[0]), out); asm.mov(Opnd::Reg(TEMP_REGS[0]), out);
asm.compile_with_num_regs(&mut cb, 2); asm.compile_with_num_regs(&mut cb, 2);
assert_disasm!(cb, "2b0500b16b0500b1e1030baa", {" assert_disasm!(cb, "200500b1010400b1", {"
0x0: adds x11, x9, #1 0x0: adds x0, x9, #1
0x4: adds x11, x11, #1 0x4: adds x1, x0, #1
0x8: mov x1, x11
"}); "});
} }
*/
} }

View file

@ -218,31 +218,6 @@ impl Opnd
pub fn match_num_bits(opnds: &[Opnd]) -> u8 { pub fn match_num_bits(opnds: &[Opnd]) -> u8 {
Self::match_num_bits_iter(opnds.iter()) Self::match_num_bits_iter(opnds.iter())
} }
/*
/// Convert Opnd::Stack into RegMapping
pub fn reg_opnd(&self) -> RegOpnd {
self.get_reg_opnd().unwrap()
}
/// Convert an operand into RegMapping if it's Opnd::Stack
pub fn get_reg_opnd(&self) -> Option<RegOpnd> {
match *self {
Opnd::Stack { idx, stack_size, num_locals, .. } => Some(
if let Some(num_locals) = num_locals {
let last_idx = stack_size as i32 + VM_ENV_DATA_SIZE as i32 - 1;
assert!(last_idx <= idx, "Local index {} must be >= last local index {}", idx, last_idx);
assert!(idx <= last_idx + num_locals as i32, "Local index {} must be < last local index {} + local size {}", idx, last_idx, num_locals);
RegOpnd::Local((last_idx + num_locals as i32 - idx) as u8)
} else {
assert!(idx < stack_size as i32);
RegOpnd::Stack((stack_size as i32 - idx - 1) as u8)
}
),
_ => None,
}
}
*/
} }
impl From<usize> for Opnd { impl From<usize> for Opnd {
@ -1213,30 +1188,6 @@ pub struct Assembler {
/// Names of labels /// Names of labels
pub(super) label_names: Vec<String>, pub(super) label_names: Vec<String>,
/*
/// Context for generating the current insn
pub ctx: Context,
/// The current ISEQ's local table size. asm.local_opnd() uses this, and it's
/// sometimes hard to pass this value, e.g. asm.spill_regs() in asm.ccall().
///
/// `None` means we're not assembling for an ISEQ, or that the local size is
/// not relevant.
pub(super) num_locals: Option<u32>,
/// Side exit caches for each SideExitContext
pub(super) side_exits: HashMap<SideExitContext, CodePtr>,
/// PC for Target::SideExit
side_exit_pc: Option<*mut VALUE>,
/// Stack size for Target::SideExit
side_exit_stack_size: Option<u8>,
/// If true, the next ccall() should verify its leafness
leaf_ccall: bool,
*/
} }
impl Assembler impl Assembler
@ -1246,20 +1197,6 @@ impl Assembler
Self::new_with_label_names(Vec::default(), 0) Self::new_with_label_names(Vec::default(), 0)
} }
/*
/// Create an Assembler for ISEQ-specific code.
/// It includes all inline code and some outlined code like side exits and stubs.
pub fn new(num_locals: u32) -> Self {
Self::new_with_label_names(Vec::default(), HashMap::default(), Some(num_locals))
}
/// Create an Assembler for outlined code that are not specific to any ISEQ,
/// e.g. trampolines that are shared globally.
pub fn new_without_iseq() -> Self {
Self::new_with_label_names(Vec::default(), HashMap::default(), None)
}
*/
/// Create an Assembler with parameters that are populated by another Assembler instance. /// Create an Assembler with parameters that are populated by another Assembler instance.
/// This API is used for copying an Assembler for the next compiler pass. /// This API is used for copying an Assembler for the next compiler pass.
pub fn new_with_label_names(label_names: Vec<String>, num_vregs: usize) -> Self { pub fn new_with_label_names(label_names: Vec<String>, num_vregs: usize) -> Self {
@ -1273,25 +1210,6 @@ impl Assembler
} }
} }
/*
/// Get the list of registers that can be used for stack temps.
pub fn get_temp_regs2() -> &'static [Reg] {
let num_regs = get_option!(num_temp_regs);
&TEMP_REGS[0..num_regs]
}
/// Get the number of locals for the ISEQ being compiled
pub fn get_num_locals(&self) -> Option<u32> {
self.num_locals
}
/// Set a context for generating side exits
pub fn set_side_exit_context(&mut self, pc: *mut VALUE, stack_size: u8) {
self.side_exit_pc = Some(pc);
self.side_exit_stack_size = Some(stack_size);
}
*/
/// Build an Opnd::VReg and initialize its LiveRange /// Build an Opnd::VReg and initialize its LiveRange
pub(super) fn new_vreg(&mut self, num_bits: u8) -> Opnd { pub(super) fn new_vreg(&mut self, num_bits: u8) -> Opnd {
let vreg = Opnd::VReg { idx: self.live_ranges.len(), num_bits }; let vreg = Opnd::VReg { idx: self.live_ranges.len(), num_bits };
@ -1330,24 +1248,6 @@ impl Assembler
self.insns.push(insn); self.insns.push(insn);
} }
/*
/// Get a cached side exit, wrapping a counter if specified
pub fn get_side_exit(&mut self, side_exit_context: &SideExitContext, counter: Option<Counter>, ocb: &mut OutlinedCb) -> Option<CodePtr> {
// Get a cached side exit
let side_exit = match self.side_exits.get(&side_exit_context) {
None => {
let exit_code = gen_outlined_exit(side_exit_context.pc, self.num_locals.unwrap(), &side_exit_context.get_ctx(), ocb)?;
self.side_exits.insert(*side_exit_context, exit_code);
exit_code
}
Some(code_ptr) => *code_ptr,
};
// Wrap a counter if needed
gen_counted_exit(side_exit_context.pc, side_exit, ocb, counter)
}
*/
/// Create a new label instance that we can jump to /// Create a new label instance that we can jump to
pub fn new_label(&mut self, name: &str) -> Target pub fn new_label(&mut self, name: &str) -> Target
{ {
@ -1358,164 +1258,6 @@ impl Assembler
Target::Label(label) Target::Label(label)
} }
/*
/// Convert Opnd::Stack to Opnd::Mem or Opnd::Reg
pub fn lower_stack_opnd(&self, opnd: &Opnd) -> Opnd {
// Convert Opnd::Stack to Opnd::Mem
fn mem_opnd(opnd: &Opnd) -> Opnd {
if let Opnd::Stack { idx, sp_offset, num_bits, .. } = *opnd {
incr_counter!(temp_mem_opnd);
Opnd::mem(num_bits, SP, (sp_offset as i32 - idx - 1) * SIZEOF_VALUE_I32)
} else {
unreachable!()
}
}
// Convert Opnd::Stack to Opnd::Reg
fn reg_opnd(opnd: &Opnd, reg_idx: usize) -> Opnd {
let regs = Assembler::get_temp_regs2();
if let Opnd::Stack { num_bits, .. } = *opnd {
incr_counter!(temp_reg_opnd);
Opnd::Reg(regs[reg_idx]).with_num_bits(num_bits).unwrap()
} else {
unreachable!()
}
}
match opnd {
Opnd::Stack { reg_mapping, .. } => {
if let Some(reg_idx) = reg_mapping.unwrap().get_reg(opnd.reg_opnd()) {
reg_opnd(opnd, reg_idx)
} else {
mem_opnd(opnd)
}
}
_ => unreachable!(),
}
}
/// Allocate a register to a stack temp if available.
pub fn alloc_reg(&mut self, mapping: RegOpnd) {
// Allocate a register if there's no conflict.
let mut reg_mapping = self.ctx.get_reg_mapping();
if reg_mapping.alloc_reg(mapping) {
self.set_reg_mapping(reg_mapping);
}
}
/// Erase local variable type information
/// eg: because of a call we can't track
pub fn clear_local_types(&mut self) {
asm_comment!(self, "clear local variable types");
self.ctx.clear_local_types();
}
/// Repurpose stack temp registers to the corresponding locals for arguments
pub fn map_temp_regs_to_args(&mut self, callee_ctx: &mut Context, argc: i32) -> Vec<RegOpnd> {
let mut callee_reg_mapping = callee_ctx.get_reg_mapping();
let mut mapped_temps = vec![];
for arg_idx in 0..argc {
let stack_idx: u8 = (self.ctx.get_stack_size() as i32 - argc + arg_idx).try_into().unwrap();
let temp_opnd = RegOpnd::Stack(stack_idx);
// For each argument, if the stack temp for it has a register,
// let the callee use the register for the local variable.
if let Some(reg_idx) = self.ctx.get_reg_mapping().get_reg(temp_opnd) {
let local_opnd = RegOpnd::Local(arg_idx.try_into().unwrap());
callee_reg_mapping.set_reg(local_opnd, reg_idx);
mapped_temps.push(temp_opnd);
}
}
asm_comment!(self, "local maps: {:?}", callee_reg_mapping);
callee_ctx.set_reg_mapping(callee_reg_mapping);
mapped_temps
}
/// Spill all live registers to the stack
pub fn spill_regs(&mut self) {
self.spill_regs_except(&vec![]);
}
/// Spill all live registers except `ignored_temps` to the stack
pub fn spill_regs_except(&mut self, ignored_temps: &Vec<RegOpnd>) {
// Forget registers above the stack top
let mut reg_mapping = self.ctx.get_reg_mapping();
for stack_idx in self.ctx.get_stack_size()..MAX_CTX_TEMPS as u8 {
reg_mapping.dealloc_reg(RegOpnd::Stack(stack_idx));
}
self.set_reg_mapping(reg_mapping);
// If no registers are in use, skip all checks
if self.ctx.get_reg_mapping() == RegMapping::default() {
return;
}
// Collect stack temps to be spilled
let mut spilled_opnds = vec![];
for stack_idx in 0..u8::min(MAX_CTX_TEMPS as u8, self.ctx.get_stack_size()) {
let reg_opnd = RegOpnd::Stack(stack_idx);
if !ignored_temps.contains(&reg_opnd) && reg_mapping.dealloc_reg(reg_opnd) {
let idx = self.ctx.get_stack_size() - 1 - stack_idx;
let spilled_opnd = self.stack_opnd(idx.into());
spilled_opnds.push(spilled_opnd);
reg_mapping.dealloc_reg(spilled_opnd.reg_opnd());
}
}
// Collect locals to be spilled
for local_idx in 0..MAX_CTX_TEMPS as u8 {
if reg_mapping.dealloc_reg(RegOpnd::Local(local_idx)) {
let first_local_ep_offset = self.num_locals.unwrap() + VM_ENV_DATA_SIZE - 1;
let ep_offset = first_local_ep_offset - local_idx as u32;
let spilled_opnd = self.local_opnd(ep_offset);
spilled_opnds.push(spilled_opnd);
reg_mapping.dealloc_reg(spilled_opnd.reg_opnd());
}
}
// Spill stack temps and locals
if !spilled_opnds.is_empty() {
asm_comment!(self, "spill_regs: {:?} -> {:?}", self.ctx.get_reg_mapping(), reg_mapping);
for &spilled_opnd in spilled_opnds.iter() {
self.spill_reg(spilled_opnd);
}
self.ctx.set_reg_mapping(reg_mapping);
}
}
/// Spill a stack temp from a register to the stack
pub fn spill_reg(&mut self, opnd: Opnd) {
assert_ne!(self.ctx.get_reg_mapping().get_reg(opnd.reg_opnd()), None);
// Use different RegMappings for dest and src operands
let reg_mapping = self.ctx.get_reg_mapping();
let mut mem_mappings = reg_mapping;
mem_mappings.dealloc_reg(opnd.reg_opnd());
// Move the stack operand from a register to memory
match opnd {
Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, .. } => {
self.mov(
Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, reg_mapping: Some(mem_mappings) },
Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, reg_mapping: Some(reg_mapping) },
);
}
_ => unreachable!(),
}
incr_counter!(temp_spill);
}
/// Update which stack temps are in a register
pub fn set_reg_mapping(&mut self, reg_mapping: RegMapping) {
if self.ctx.get_reg_mapping() != reg_mapping {
asm_comment!(self, "reg_mapping: {:?} -> {:?}", self.ctx.get_reg_mapping(), reg_mapping);
self.ctx.set_reg_mapping(reg_mapping);
}
}
*/
// Shuffle register moves, sometimes adding extra moves using SCRATCH_REG, // Shuffle register moves, sometimes adding extra moves using SCRATCH_REG,
// so that they will not rewrite each other before they are used. // so that they will not rewrite each other before they are used.
pub fn resolve_parallel_moves(old_moves: &Vec<(Reg, Opnd)>) -> Vec<(Reg, Opnd)> { pub fn resolve_parallel_moves(old_moves: &Vec<(Reg, Opnd)>) -> Vec<(Reg, Opnd)> {
@ -1937,31 +1679,6 @@ impl Assembler {
out out
} }
/*
/// Let vm_check_canary() assert the leafness of this ccall if leaf_ccall is set
fn set_stack_canary(&mut self, opnds: &Vec<Opnd>) -> Option<Opnd> {
// Use the slot right above the stack top for verifying leafness.
let canary_opnd = self.stack_opnd(-1);
// If the slot is already used, which is a valid optimization to avoid spills,
// give up the verification.
let canary_opnd = if cfg!(feature = "runtime_checks") && self.leaf_ccall && opnds.iter().all(|opnd|
opnd.get_reg_opnd() != canary_opnd.get_reg_opnd()
) {
asm_comment!(self, "set stack canary");
self.mov(canary_opnd, vm_stack_canary().into());
Some(canary_opnd)
} else {
None
};
// Avoid carrying the flag to the next instruction whether we verified it or not.
self.leaf_ccall = false;
canary_opnd
}
*/
pub fn cmp(&mut self, left: Opnd, right: Opnd) { pub fn cmp(&mut self, left: Opnd, right: Opnd) {
self.push_insn(Insn::Cmp { left, right }); self.push_insn(Insn::Cmp { left, right });
} }
@ -1975,10 +1692,6 @@ impl Assembler {
pub fn cpop_all(&mut self) { pub fn cpop_all(&mut self) {
self.push_insn(Insn::CPopAll); self.push_insn(Insn::CPopAll);
// Re-enable ccall's RegMappings assertion disabled by cpush_all.
// cpush_all + cpop_all preserve all stack temp registers, so it's safe.
//self.set_reg_mapping(self.ctx.get_reg_mapping());
} }
pub fn cpop_into(&mut self, opnd: Opnd) { pub fn cpop_into(&mut self, opnd: Opnd) {
@ -1991,12 +1704,6 @@ impl Assembler {
pub fn cpush_all(&mut self) { pub fn cpush_all(&mut self) {
self.push_insn(Insn::CPushAll); self.push_insn(Insn::CPushAll);
// Mark all temps as not being in registers.
// Temps will be marked back as being in registers by cpop_all.
// We assume that cpush_all + cpop_all are used for C functions in utils.rs
// that don't require spill_regs for GC.
//self.set_reg_mapping(RegMapping::default());
} }
pub fn cret(&mut self, opnd: Opnd) { pub fn cret(&mut self, opnd: Opnd) {
@ -2257,18 +1964,6 @@ impl Assembler {
out out
} }
/*
/// Verify the leafness of the given block
pub fn with_leaf_ccall<F, R>(&mut self, mut block: F) -> R
where F: FnMut(&mut Self) -> R {
let old_leaf_ccall = self.leaf_ccall;
self.leaf_ccall = true;
let ret = block(self);
self.leaf_ccall = old_leaf_ccall;
ret
}
*/
/// Add a label at the current position /// Add a label at the current position
pub fn write_label(&mut self, target: Target) { pub fn write_label(&mut self, target: Target) {
assert!(target.unwrap_label().0 < self.label_names.len()); assert!(target.unwrap_label().0 < self.label_names.len());

View file

@ -1,4 +1,3 @@
/*
#![cfg(test)] #![cfg(test)]
use crate::asm::CodeBlock; use crate::asm::CodeBlock;
use crate::backend::*; use crate::backend::*;
@ -328,5 +327,3 @@ fn test_no_pos_marker_callback_when_compile_fails() {
let cb = &mut CodeBlock::new_dummy(8); let cb = &mut CodeBlock::new_dummy(8);
assert!(asm.compile(cb, None).is_none(), "should fail due to tiny size limit"); assert!(asm.compile(cb, None).is_none(), "should fail due to tiny size limit");
} }
*/