ruby-changes:73158
From: Maxime <ko1@a...>
Date: Tue, 30 Aug 2022 00:57:23 +0900 (JST)
Subject: [ruby-changes:73158] 2eba6aef72 (master): Port over get_branch_target()
https://git.ruby-lang.org/ruby.git/commit/?id=2eba6aef72 From 2eba6aef724f20162bd650d535be876aa4a19964 Mon Sep 17 00:00:00 2001 From: Maxime Chevalier-Boisvert <maxime.chevalierboisvert@s...> Date: Tue, 21 Jun 2022 11:05:20 -0400 Subject: Port over get_branch_target() --- yjit/src/backend/arm64/mod.rs | 16 +++++++++++++++- yjit/src/backend/tests.rs | 7 +++++-- yjit/src/core.rs | 24 ++++++++++++------------ 3 files changed, 32 insertions(+), 15 deletions(-) diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs index be67e2384d..4e4c553c9d 100644 --- a/yjit/src/backend/arm64/mod.rs +++ b/yjit/src/backend/arm64/mod.rs @@ -34,7 +34,7 @@ impl From<Opnd> for A64Opnd { https://github.com/ruby/ruby/blob/trunk/yjit/src/backend/arm64/mod.rs#L34 impl Assembler { /// Get the list of registers from which we can allocate on this platform - pub fn get_scratch_regs() -> Vec<Reg> + pub fn get_alloc_regs() -> Vec<Reg> { vec![ X12_REG, @@ -45,6 +45,11 @@ impl Assembler https://github.com/ruby/ruby/blob/trunk/yjit/src/backend/arm64/mod.rs#L45 /// Split platform-specific instructions fn arm64_split(mut self) -> Assembler { + // The transformations done here are meant to make our lives simpler in later + // stages of the compilation pipeline. + // Here we may want to make sure that all instructions (except load and store) + // have no memory operands. + todo!(); } @@ -52,6 +57,15 @@ impl Assembler https://github.com/ruby/ruby/blob/trunk/yjit/src/backend/arm64/mod.rs#L57 /// Returns a list of GC offsets pub fn arm64_emit(&mut self, cb: &mut CodeBlock) -> Vec<u32> { + // NOTE: dear Kevin, + // for arm, you may want to reserve 1 or 2 caller-save registers + // to use as scracth registers (during the last phase of the codegen) + // These registers will not be allocated to anything by the register + // allocator, they're just useful because arm is slightly trickier + // than x86 to generate code for. + // For example, if you want to jump far away, you may want to store + // the jump target address in a register first. + todo!(); } diff --git a/yjit/src/backend/tests.rs b/yjit/src/backend/tests.rs index a8ae1bc97a..3a0f14e1f4 100644 --- a/yjit/src/backend/tests.rs +++ b/yjit/src/backend/tests.rs @@ -192,12 +192,15 @@ fn test_c_call() https://github.com/ruby/ruby/blob/trunk/yjit/src/backend/tests.rs#L192 let (mut asm, mut cb) = setup_asm(); - asm.ccall( + let ret_val = asm.ccall( dummy_c_fun as *const u8, vec![Opnd::mem(64, SP, 0), Opnd::UImm(1)] ); - asm.compile_with_num_regs(&mut cb, 2); + // Make sure that the call's return value is usable + asm.mov(Opnd::mem(64, SP, 0), ret_val); + + asm.compile_with_num_regs(&mut cb, 1); } #[test] diff --git a/yjit/src/core.rs b/yjit/src/core.rs index 10ef9c5151..a2659b55fd 100644 --- a/yjit/src/core.rs +++ b/yjit/src/core.rs @@ -1768,29 +1768,29 @@ fn get_branch_target( https://github.com/ruby/ruby/blob/trunk/yjit/src/core.rs#L1768 // This means the branch stub owns its own reference to the branch let branch_ptr: *const RefCell<Branch> = BranchRef::into_raw(branchref.clone()); + let mut asm = Assembler::new(); - - - - todo!("stub codegen with new assembler"); - - /* // Call branch_stub_hit(branch_idx, target_idx, ec) - mov(ocb, C_ARG_REGS[2], REG_EC); - mov(ocb, C_ARG_REGS[1], uimm_opnd(target_idx as u64)); - mov(ocb, C_ARG_REGS[0], const_ptr_opnd(branch_ptr as *const u8)); - call_ptr(ocb, REG0, branch_stub_hit as *mut u8); + let jump_addr = asm.ccall( + branch_stub_hit as *mut u8, + vec![ + EC, + Opnd::UImm(target_idx as u64), + Opnd::const_ptr(branch_ptr as *const u8) + ] + ); // Jump to the address returned by the // branch_stub_hit call - jmp_rm(ocb, RAX); + asm.jmp_opnd(jump_addr); + + asm.compile(ocb); if ocb.has_dropped_bytes() { None // No space } else { Some(stub_addr) } - */ } pub fn gen_branch( -- cgit v1.2.1 -- ML: ruby-changes@q... Info: http://www.atdot.net/~ko1/quickml/