Files
cs420/src/asmgen/mod.rs
2025-06-18 04:39:26 +00:00

3570 lines
154 KiB
Rust

use core::{f32, num};
use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
use std::hash::Hash;
use std::ops::Deref;
use itertools::Itertools;
use lang_c::ast;
use crate::ir::HasDtype;
use crate::opt::opt_utils::*;
use crate::{Translate, asm, ir};
#[derive(Debug, Default)]
pub struct Asmgen {
functions: Vec<asm::Section<asm::Function>>,
variables: Vec<asm::Section<asm::Variable>>,
phinode_counter: usize,
}
impl Translate<ir::TranslationUnit> for Asmgen {
type Target = asm::Asm;
type Error = ();
fn translate(&mut self, source: &ir::TranslationUnit) -> Result<Self::Target, Self::Error> {
for (name, decl) in &source.decls {
self.translate_decl(name, decl, &source.structs);
}
// println!("{:?}", self.functions);
Ok(asm::Asm {
unit: asm::TranslationUnit {
functions: std::mem::take(&mut self.functions),
variables: std::mem::take(&mut self.variables),
},
})
}
}
struct InferenceGraph {
edges: HashSet<(ir::RegisterId, ir::RegisterId)>,
vertices: HashMap<ir::RegisterId, (ir::Dtype, asm::Register)>,
analysis: Analysis,
lives: HashMap<ir::RegisterId, HashSet<ir::RegisterId>>,
}
impl InferenceGraph {
fn new(
code: &ir::FunctionDefinition,
signature: &ir::FunctionSignature,
structs: &HashMap<String, Option<ir::Dtype>>,
) -> Self {
let cfg = make_cfg(code);
let reverse_cfg = reverse_cfg(&cfg);
let domtree = Domtree::new(code.bid_init, &cfg, &reverse_cfg);
let mut lives = HashMap::new();
let mut first_loop = true;
let mut vertices = HashMap::new();
loop {
let mut changed = false;
for bid in domtree.rpo() {
let block = &code.blocks[bid];
let len = block.instructions.len();
let mut uses = HashSet::new();
if first_loop {
for (i, dtype) in block.phinodes.iter().enumerate() {
let rid = ir::RegisterId::arg(*bid, i);
let _unused =
vertices.insert(rid, (dtype.deref().clone(), asm::Register::Zero));
}
}
for (i, inst) in block.instructions.iter().enumerate() {
if first_loop {
let rid = ir::RegisterId::temp(*bid, i);
let _unused =
vertices.insert(rid, (inst.dtype().clone(), asm::Register::Zero));
}
match inst.deref() {
ir::Instruction::BinOp { lhs, rhs, .. } => {
mark_as_used(lhs, &mut uses);
mark_as_used(rhs, &mut uses);
}
ir::Instruction::UnaryOp { operand, .. } => {
mark_as_used(operand, &mut uses);
}
ir::Instruction::Store { ptr, value } => {
mark_as_used(ptr, &mut uses);
mark_as_used(value, &mut uses);
}
ir::Instruction::Load { ptr } => {
mark_as_used(ptr, &mut uses);
}
ir::Instruction::Call { callee, args, .. } => {
mark_as_used(callee, &mut uses);
for arg in args {
mark_as_used(arg, &mut uses);
}
}
ir::Instruction::TypeCast { value, .. } => {
mark_as_used(value, &mut uses);
}
ir::Instruction::GetElementPtr { ptr, offset, .. } => {
mark_as_used(ptr, &mut uses);
mark_as_used(offset, &mut uses);
}
_ => (),
}
let mut next_lives = lives
.entry(ir::RegisterId::temp(*bid, i + 1))
.or_insert_with(HashSet::new)
.clone();
let _ = next_lives.remove(&ir::RegisterId::temp(*bid, i));
uses.extend(next_lives);
changed = extend_set(
lives
.entry(ir::RegisterId::temp(*bid, i))
.or_insert_with(HashSet::new),
std::mem::take(&mut uses),
) || changed;
}
let mut uses = HashSet::new();
// for (aid, _) in block.phinodes.iter().enumerate() {
// let _ = uses.insert(ir::RegisterId::arg(*bid, aid));
// }
match &block.exit {
ir::BlockExit::Jump { arg } => {
for arg in &arg.args {
mark_as_used(arg, &mut uses);
}
mark_as_used_phinodes(arg, code, &mut uses);
}
ir::BlockExit::ConditionalJump {
condition,
arg_then,
arg_else,
} => {
mark_as_used(condition, &mut uses);
for arg in &arg_then.args {
mark_as_used(arg, &mut uses);
}
for arg in &arg_else.args {
mark_as_used(arg, &mut uses);
}
mark_as_used_phinodes(arg_then, code, &mut uses);
mark_as_used_phinodes(arg_else, code, &mut uses);
}
ir::BlockExit::Switch {
value,
default,
cases,
} => {
mark_as_used(value, &mut uses);
for arg in &default.args {
mark_as_used(arg, &mut uses);
}
mark_as_used_phinodes(default, code, &mut uses);
for (_, arg) in cases {
for arg in &arg.args {
mark_as_used(arg, &mut uses);
}
mark_as_used_phinodes(arg, code, &mut uses);
}
}
ir::BlockExit::Return { value } => {
mark_as_used(value, &mut uses);
}
_ => (),
}
let successors = cfg[bid].iter().map(|arg| arg.bid).collect::<Vec<_>>();
let mut successor_lives = HashSet::<ir::RegisterId>::new();
for succ in &successors {
if let Some(succ_lives) = lives.get(&ir::RegisterId::temp(*succ, 0)) {
successor_lives.extend(succ_lives);
}
}
uses.extend(successor_lives);
changed = extend_set(
lives
.entry(ir::RegisterId::temp(*bid, len))
.or_insert_with(HashSet::new),
uses,
) || changed;
let phinodes = (0..block.phinodes.len())
.map(|i| ir::RegisterId::arg(*bid, i))
.collect();
changed = extend_set(
lives
.entry(ir::RegisterId::temp(*bid, 0))
.or_insert_with(HashSet::new),
phinodes,
) || changed;
}
if !changed {
break;
}
first_loop = false;
}
let mut edges = HashSet::new();
for lives in lives.values() {
for rid1 in lives {
for rid2 in lives {
if rid1 != rid2 {
let _ = edges.insert((*rid1, *rid2));
}
}
}
}
// for (bid, block) in &code.blocks {
// for (i, _) in block.phinodes.iter().enumerate() {
// let rid1 = ir::RegisterId::arg(*bid, i);
// for (j, _) in block.phinodes.iter().enumerate() {
// if i != j {
// let rid2 = ir::RegisterId::arg(*bid, j);
// let _ = edges.insert((rid1, rid2));
// }
// }
// for (j, _) in block.instructions.iter().enumerate() {
// let rid2 = ir::RegisterId::temp(*bid, j);
// let _ = edges.insert((rid1, rid2));
// let _ = edges.insert((rid2, rid1));
// }
// }
// }
let mut num_of_edges = HashMap::new();
for (rid1, _) in &edges {
*num_of_edges.entry(*rid1).or_insert(0) += 1;
}
let analysis = analyze_function(code, signature, structs);
if !analysis.has_memcpy_in_prologue {
for (aid, dtype) in code.blocks[&code.bid_init].phinodes.iter().enumerate() {
let rid = ir::RegisterId::arg(code.bid_init, aid);
if analysis.is_temporary2(&rid, &lives, true) {
if is_integer(dtype) {
let (_, asm_reg) = vertices.get_mut(&rid).unwrap();
*asm_reg = asm::Register::arg(
asm::RegisterType::Integer,
analysis.primitive_arg_reg_index[&aid] as usize,
);
} else if is_float(dtype) {
let (_, asm_reg) = vertices.get_mut(&rid).unwrap();
*asm_reg = asm::Register::arg(
asm::RegisterType::FloatingPoint,
analysis.primitive_arg_reg_index[&aid] as usize,
);
}
}
}
}
for (bid, iid, _, reg) in &analysis.calls {
let rid = ir::RegisterId::temp(*bid, *iid);
if analysis.is_temporary2(&rid, &lives, true) {
if let Some(reg) = reg {
let (_, asm_reg) = vertices.get_mut(&rid).unwrap();
*asm_reg = *reg;
}
}
}
let mut spilled = HashSet::new();
let mut visited = HashSet::new();
while let Some((loc, clique)) =
find_large_integer_clique(&lives, &vertices, &spilled, &visited)
{
let _ = visited.insert(loc);
let mut not_spilled = clique
.iter()
.filter(|reg| vertices[*reg].1 != asm::Register::Zero)
.cloned()
.collect::<HashSet<_>>();
let mut usable_temp_regs = 4 - clique
.iter()
.filter(|reg| matches!(vertices[*reg].1, asm::Register::Temp(_, _,)))
.count();
let mut usable_arg_regs = 8 - clique
.iter()
.filter(|reg| matches!(vertices[*reg].1, asm::Register::Arg(_, _,)))
.count();
let mut usable_saved_regs = 11
- clique
.iter()
.filter(|reg| matches!(vertices[*reg].1, asm::Register::Saved(_, _,)))
.count();
for reg in &clique {
if not_spilled.contains(reg) {
continue;
}
if usable_temp_regs > 0 && analysis.is_temporary2(reg, &lives, false) {
usable_temp_regs -= 1;
let _ = not_spilled.insert(*reg);
} else if usable_arg_regs > 0 && analysis.is_temporary2(reg, &lives, true) {
usable_arg_regs -= 1;
let _ = not_spilled.insert(*reg);
} else if usable_saved_regs > 0 {
usable_saved_regs -= 1;
let _ = not_spilled.insert(*reg);
}
}
println!("clique: {:?}", clique);
for reg in clique.difference(&not_spilled) {
println!("spilled! {:?}", reg);
let _ = spilled.insert(*reg);
}
}
let mut vertices_order = vertices
.keys()
.map(|rid| (*rid, num_of_edges.get(rid).cloned().unwrap_or_default()))
.sorted_by(|(_, v1), (_, v2)| v2.cmp(v1));
for (rid, count) in vertices_order {
if count == 0 || vertices[&rid].1 != asm::Register::Zero || spilled.contains(&rid) {
continue;
}
let dtype = &vertices[&rid].0;
let neightbors = edges
.iter()
.filter_map(|(r1, r2)| {
if *r1 == rid {
Some(&vertices[r2])
} else {
None
}
})
.collect::<Vec<_>>();
let neighbor_registers = neightbors
.iter()
.filter_map(|(_, reg)| {
// TODO: Saved말고 다른 것도 쓰게?
if is_integer(dtype)
&& matches!(
reg,
asm::Register::Saved(asm::RegisterType::Integer, _)
| asm::Register::Arg(asm::RegisterType::Integer, _)
| asm::Register::Temp(asm::RegisterType::Integer, _)
)
{
return Some(*reg);
}
if is_float(dtype)
&& matches!(
reg,
asm::Register::Saved(asm::RegisterType::FloatingPoint, _)
| asm::Register::Arg(asm::RegisterType::FloatingPoint, _)
| asm::Register::Temp(asm::RegisterType::FloatingPoint, _)
)
{
return Some(*reg);
}
None
})
.collect::<HashSet<_>>();
if is_integer(dtype) {
let smallest_temp_reg = smallest_missing_integer(
&neighbor_registers
.iter()
.filter_map(|reg| {
if let asm::Register::Temp(_, i) = reg {
Some(*i)
} else {
None
}
})
.collect(),
3,
); // t0~2는 못 씀
let smallest_arg_reg = smallest_missing_integer(
&neighbor_registers
.iter()
.filter_map(|reg| {
if let asm::Register::Arg(_, i) = reg {
Some(*i)
} else {
None
}
})
.collect(),
0,
);
let smallest_saved_reg = smallest_missing_integer(
&neighbor_registers
.iter()
.filter_map(|reg| {
if let asm::Register::Saved(_, i) = reg {
Some(*i)
} else {
None
}
})
.collect(),
1,
); // s0는 못 씀
if smallest_temp_reg <= 6 && analysis.is_temporary2(&rid, &lives, false) {
let _unused = vertices.insert(
rid,
(
dtype.clone(),
asm::Register::temp(asm::RegisterType::Integer, smallest_temp_reg),
),
);
} else if smallest_arg_reg <= 7 && analysis.is_temporary2(&rid, &lives, true) {
let _unused = vertices.insert(
rid,
(
dtype.clone(),
asm::Register::arg(asm::RegisterType::Integer, smallest_arg_reg),
),
);
} else if smallest_saved_reg <= 11 {
let _unused = vertices.insert(
rid,
(
dtype.clone(),
asm::Register::saved(asm::RegisterType::Integer, smallest_saved_reg),
),
);
} else {
// Spilling
}
} else if is_float(dtype) {
let smallest_temp_reg = smallest_missing_integer(
&neighbor_registers
.iter()
.filter_map(|reg| {
if let asm::Register::Temp(_, i) = reg {
Some(*i)
} else {
None
}
})
.collect(),
2,
); // ft0~1은 못 씀
let smallest_arg_reg = smallest_missing_integer(
&neighbor_registers
.iter()
.filter_map(|reg| {
if let asm::Register::Arg(_, i) = reg {
Some(*i)
} else {
None
}
})
.collect(),
0,
);
let smallest_saved_reg = smallest_missing_integer(
&neighbor_registers
.iter()
.filter_map(|reg| {
if let asm::Register::Saved(_, i) = reg {
Some(*i)
} else {
None
}
})
.collect(),
0,
);
if smallest_temp_reg <= 11 && analysis.is_temporary2(&rid, &lives, false) {
let _unused = vertices.insert(
rid,
(
dtype.clone(),
asm::Register::temp(
asm::RegisterType::FloatingPoint,
smallest_temp_reg,
),
),
);
} else if smallest_arg_reg <= 7 && analysis.is_temporary2(&rid, &lives, true) {
let _unused = vertices.insert(
rid,
(
dtype.clone(),
asm::Register::arg(asm::RegisterType::FloatingPoint, smallest_arg_reg),
),
);
} else if smallest_saved_reg <= 11 {
let _unused = vertices.insert(
rid,
(
dtype.clone(),
asm::Register::saved(
asm::RegisterType::FloatingPoint,
smallest_saved_reg,
),
),
);
} else {
// Spilling
}
} else {
// TODO: Spilling or 레지스터 쪼개기 필요
}
}
InferenceGraph {
edges,
vertices,
analysis,
lives,
}
}
fn get_register(&self, rid: &ir::RegisterId) -> Option<asm::Register> {
let register = self.vertices[rid].1;
if matches!(register, asm::Register::Zero) {
None
} else {
Some(register)
}
}
}
fn mark_as_used(operand: &ir::Operand, uses: &mut HashSet<ir::RegisterId>) {
if let ir::Operand::Register { rid, .. } = operand {
if !matches!(rid, ir::RegisterId::Local { .. }) {
let _ = uses.insert(*rid);
}
}
}
fn mark_as_used_phinodes(
arg: &ir::JumpArg,
code: &ir::FunctionDefinition,
uses: &mut HashSet<ir::RegisterId>,
) {
for (aid, _) in code.blocks[&arg.bid].phinodes.iter().enumerate() {
let _ = uses.insert(ir::RegisterId::Arg { bid: arg.bid, aid });
}
}
fn extend_set(set: &mut HashSet<ir::RegisterId>, other: HashSet<ir::RegisterId>) -> bool {
let len = set.len();
set.extend(other);
set.len() != len
}
fn smallest_missing_integer(set: &HashSet<usize>, start: usize) -> usize {
let mut i = start;
while set.contains(&i) {
i += 1;
}
i
}
fn find_large_integer_clique(
lives: &HashMap<ir::RegisterId, HashSet<ir::RegisterId>>,
vertices: &HashMap<ir::RegisterId, (ir::Dtype, asm::Register)>,
spilled: &HashSet<ir::RegisterId>,
visited: &HashSet<ir::RegisterId>,
) -> Option<(ir::RegisterId, HashSet<ir::RegisterId>)> {
lives
.iter()
.filter_map(|(loc, regs)| {
let count = regs
.iter()
.filter(|reg| is_integer(&vertices[*reg].0) && !spilled.contains(*reg))
.count();
if !visited.contains(loc) && count >= 12 {
Some((loc, regs, count))
} else {
None
}
})
.sorted_by(|(_, _, v1), (_, _, v2)| v2.cmp(v1))
.take(1)
.next()
.map(|(loc, regs, _)| {
(
*loc,
regs.iter()
.filter(|reg| is_integer(&vertices[*reg].0) && !spilled.contains(*reg))
.cloned()
.collect::<HashSet<_>>(),
)
})
}
struct Analysis {
num_int_args: i32,
num_float_args: i32,
primitive_arg_reg_index: HashMap<usize, i32>,
is_a0_return_pointer: bool,
has_memcpy_in_prologue: bool,
calls: Vec<(
ir::BlockId,
usize,
HashSet<ir::RegisterId>,
Option<asm::Register>,
)>,
has_call: bool,
}
impl Analysis {
fn is_temporary2(
&self,
reg: &ir::RegisterId,
lives: &HashMap<ir::RegisterId, HashSet<ir::RegisterId>>,
is_a_reg: bool,
) -> bool {
for (call_bid, call_iid, call_args, _) in &self.calls {
let lives_before = lives
.get(&ir::RegisterId::temp(*call_bid, *call_iid))
.unwrap();
let lives_after = lives
.get(&ir::RegisterId::temp(*call_bid, call_iid + 1))
.unwrap();
if lives_before.contains(reg)
&& ((is_a_reg && call_args.contains(reg)) || lives_after.contains(reg))
{
return false;
}
}
true
}
}
fn analyze_function(
code: &ir::FunctionDefinition,
signature: &ir::FunctionSignature,
structs: &HashMap<String, Option<ir::Dtype>>,
) -> Analysis {
let (num_int_args, num_float_args, primitive_arg_reg_index) =
get_number_of_register_arguments(&signature.ret, &signature.params, structs);
let is_a0_return_pointer = is_struct(&signature.ret, structs).is_some_and(|size| size > 16);
let mut calls = Vec::new();
for (bid, block) in &code.blocks {
for (iid, inst) in block.instructions.iter().enumerate() {
match inst.deref() {
ir::Instruction::Call { callee, args, .. } => {
let mut used = HashSet::new();
mark_as_used(callee, &mut used);
for arg in args {
mark_as_used(arg, &mut used);
}
let return_type = callee
.dtype()
.get_pointer_inner()
.unwrap()
.get_function_inner()
.unwrap()
.0
.clone();
let ret_asm_reg = if is_integer(&return_type) {
Some(asm::Register::A0)
} else if is_float(&return_type) {
Some(asm::Register::FA0)
} else {
None
};
calls.push((*bid, iid, used, ret_asm_reg));
}
ir::Instruction::Store { value, .. } => {
if is_struct(&value.dtype(), structs).is_some() {
calls.push((*bid, iid, HashSet::new(), None)); // memcpy
}
}
ir::Instruction::Load { ptr } => {
if is_struct(ptr.dtype().get_pointer_inner().unwrap(), structs).is_some() {
calls.push((*bid, iid, HashSet::new(), None)); // memcpy
}
}
_ => (),
}
}
}
let has_memcpy_in_prologue = signature.params.iter().any(|param| {
if let Some(size) = is_struct(param, structs) {
size > 16
} else {
false
}
});
let has_call = has_memcpy_in_prologue || !calls.is_empty();
Analysis {
num_int_args,
num_float_args,
primitive_arg_reg_index,
is_a0_return_pointer,
has_memcpy_in_prologue,
calls,
has_call,
}
}
struct Context {
insts: Vec<asm::Instruction>,
stack_offsets: HashMap<ir::RegisterId, u64>,
stack_allocation: u64,
new_blocks: Vec<(asm::Label, Vec<asm::Instruction>)>,
inference_graph: InferenceGraph,
saved_reg_offset: usize,
}
impl Asmgen {
fn translate_decl(
&mut self,
name: &String,
decl: &ir::Declaration,
structs: &HashMap<String, Option<ir::Dtype>>,
) {
match decl {
ir::Declaration::Variable { dtype, initializer } => {
if let Some(initializer) = initializer {
let mut directives = initializer_to_directives(initializer);
if let ir::Dtype::Array { inner, size } = dtype {
if *size != directives.len() {
directives.push(asm::Directive::Zero(
(size - directives.len())
* (get_dtype_size(inner, structs) as usize),
));
}
}
self.variables.push(asm::Section::new(
vec![
asm::Directive::Globl(asm::Label(name.clone())),
asm::Directive::Section(asm::SectionType::Data),
asm::Directive::Type(asm::Label(name.clone()), asm::SymbolType::Object),
],
asm::Variable::new(asm::Label(name.clone()), directives),
));
} else {
let mut directives = Vec::new();
match dtype {
ir::Dtype::Array { inner, size } => directives.push(asm::Directive::Zero(
size * (get_dtype_size(inner, structs) as usize),
)),
_ => directives
.push(asm::Directive::Zero(get_dtype_size(dtype, structs) as usize)),
}
self.variables.push(asm::Section::new(
vec![
asm::Directive::Globl(asm::Label(name.clone())),
asm::Directive::Section(asm::SectionType::Data),
asm::Directive::Type(asm::Label(name.clone()), asm::SymbolType::Object),
],
asm::Variable::new(asm::Label(name.clone()), directives),
));
}
}
ir::Declaration::Function {
signature,
definition,
} => {
if let Some(definition) = definition {
let graph = InferenceGraph::new(definition, signature, structs);
// println!("{name} asdf: {:?}", graph.vertices);
let mut context =
self.translate_prologue(signature, definition, structs, graph);
self.translate_block(
name,
definition.bid_init,
&definition.blocks[&definition.bid_init],
&mut context,
structs,
);
let mut blocks = vec![asm::Block::new(
Some(asm::Label(name.clone())),
context.insts,
)];
for (bid, block) in &definition.blocks {
if *bid == definition.bid_init {
continue;
}
context.insts = Vec::new();
self.translate_block(name, *bid, block, &mut context, structs);
blocks.push(asm::Block::new(
Some(asm::Label::new(name, *bid)),
context.insts,
));
for (label, insts) in std::mem::take(&mut context.new_blocks) {
blocks.push(asm::Block::new(Some(label), insts));
}
}
self.functions.push(asm::Section::new(
vec![
asm::Directive::Globl(asm::Label(name.clone())),
asm::Directive::Section(asm::SectionType::Text),
asm::Directive::Type(
asm::Label(name.clone()),
asm::SymbolType::Function,
),
],
asm::Function::new(blocks),
))
}
}
}
}
fn translate_prologue(
&mut self,
signature: &ir::FunctionSignature,
definition: &ir::FunctionDefinition,
structs: &HashMap<String, Option<ir::Dtype>>,
inference_graph: InferenceGraph,
) -> Context {
let mut stack_allocation = 0;
let mut stack_offsets = HashMap::new();
for (bid, block) in &definition.blocks {
for (aid, dtype) in block.phinodes.iter().enumerate() {
let rid = ir::RegisterId::arg(*bid, aid);
if inference_graph.get_register(&rid).is_none() {
let _ = stack_offsets.insert(rid, stack_allocation);
stack_allocation += ceil_to_multiple_of_16(get_dtype_size(dtype, structs));
}
}
for (iid, inst) in block.instructions.iter().enumerate() {
let rid = ir::RegisterId::temp(*bid, iid);
if inference_graph.get_register(&rid).is_none() {
let _ = stack_offsets.insert(rid, stack_allocation);
stack_allocation +=
ceil_to_multiple_of_16(get_dtype_size(&inst.dtype(), structs));
}
}
}
for (aid, dtype) in definition.allocations.iter().enumerate() {
let _ = stack_offsets.insert(ir::RegisterId::local(aid), stack_allocation);
stack_allocation += ceil_to_multiple_of_16(get_dtype_size(dtype, structs));
}
stack_allocation += 8; // s0
let mut saved_reg_offset = 8;
if inference_graph.analysis.has_call {
stack_allocation += 8; // ra
saved_reg_offset += 8;
}
if inference_graph.analysis.is_a0_return_pointer {
stack_allocation += 8; // a0
saved_reg_offset += 8;
}
let num_int_saved_regs = inference_graph
.vertices
.values()
.filter_map(|(_, reg)| {
if let asm::Register::Saved(asm::RegisterType::Integer, i) = reg {
Some(i)
} else {
None
}
})
.collect::<HashSet<_>>()
.len();
let num_float_saved_regs = inference_graph
.vertices
.values()
.filter_map(|(_, reg)| {
if let asm::Register::Saved(asm::RegisterType::FloatingPoint, i) = reg {
Some(i)
} else {
None
}
})
.collect::<HashSet<_>>()
.len();
stack_allocation += ((num_int_saved_regs + num_float_saved_regs) * 8) as u64;
if stack_allocation == 8 {
// Only s0: No Spill
stack_allocation = 0;
} else if stack_allocation % 16 != 0 {
// 스택은 16바이트 정렬
stack_allocation += 16 - (stack_allocation % 16);
}
for offset in stack_offsets.values_mut() {
*offset = offset.wrapping_sub(stack_allocation);
}
let mut insts = vec![];
if stack_allocation != 0 {
self.translate_addi(
asm::Register::Sp,
asm::Register::Sp,
!stack_allocation + 1,
&mut insts,
);
self.translate_store(
asm::SType::SD,
asm::Register::Sp,
asm::Register::S0,
stack_allocation - 8,
&mut insts,
);
self.translate_addi(
asm::Register::S0,
asm::Register::Sp,
stack_allocation,
&mut insts,
);
if inference_graph.analysis.has_call {
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::S0,
rs2: asm::Register::Ra,
imm: asm::Immediate::Value(!16 + 1),
});
}
if inference_graph.analysis.is_a0_return_pointer {
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::S0,
rs2: asm::Register::A0,
imm: asm::Immediate::Value(!24 + 1),
});
}
// S1~레지스터 및 FS0~ 레지스터 백업
for i in 0..num_int_saved_regs {
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::S0,
rs2: asm::Register::saved(
asm::RegisterType::Integer,
i + 1, // S0는 이 용도로 쓰지 않음
),
imm: asm::Immediate::Value((!(saved_reg_offset + (i + 1) * 8) + 1) as u64),
});
}
for i in 0..num_float_saved_regs {
insts.push(asm::Instruction::SType {
instr: asm::SType::store(ir::Dtype::DOUBLE),
rs1: asm::Register::S0,
rs2: asm::Register::saved(asm::RegisterType::FloatingPoint, i),
imm: asm::Immediate::Value(
(!(saved_reg_offset + num_int_saved_regs * 8 + (i + 1) * 8) + 1) as u64,
),
});
}
}
let mut num_int_args = 0;
let mut num_float_args = 0;
if let Some(size) = is_struct(&signature.ret, structs) {
if size > 16 {
num_int_args += 1;
}
}
let mut large_struct = HashMap::new();
for (i, dtype) in signature.params.iter().enumerate() {
let rid = ir::RegisterId::arg(definition.bid_init, i);
let rd = inference_graph.get_register(&rid);
if is_integer(dtype) {
let rs = asm::Register::arg(asm::RegisterType::Integer, num_int_args);
num_int_args += 1;
if let Some(rd) = rd {
if rd != rs {
insts.push(asm::Instruction::Pseudo(asm::Pseudo::Mv { rd, rs }));
}
} else {
self.translate_store(
asm::SType::store(dtype.clone()),
asm::Register::S0,
rs,
stack_offsets[&rid],
&mut insts,
);
}
} else if is_float(dtype) {
let rs = asm::Register::arg(asm::RegisterType::FloatingPoint, num_float_args);
num_float_args += 1;
if let Some(rd) = rd {
if rd != rs {
insts.push(asm::Instruction::Pseudo(asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(dtype.clone()).unwrap(),
rd,
rs,
}));
}
} else {
self.translate_store(
asm::SType::store(dtype.clone()),
asm::Register::S0,
rs,
stack_offsets[&rid],
&mut insts,
);
}
} else if let Some(size) = is_struct(dtype, structs) {
if size > 16 {
let _ = large_struct.insert(i, num_int_args);
num_int_args += 1;
} else {
let stack_offset = stack_offsets[&rid];
let (struct_dtype, fields) = get_struct_dtype(dtype, structs);
let mut is_packing = false;
let mut packing_start_offset = 0;
let mut packing_size = 0;
for field_dtype in fields {
let (offset, _) = struct_dtype
.get_offset_struct_field(field_dtype.name().unwrap(), structs)
.unwrap();
let field_size = get_dtype_size(field_dtype, structs);
if is_integer(field_dtype) {
if !is_packing {
is_packing = true;
packing_start_offset = offset;
packing_size = field_size;
} else if offset == packing_start_offset + (packing_size as usize)
&& packing_size + field_size <= 8
{
packing_size += field_size;
} else {
self.translate_addi(
asm::Register::T2,
asm::Register::S0,
stack_offset,
&mut insts,
);
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::T2,
rs2: asm::Register::arg(
asm::RegisterType::Integer,
num_int_args,
),
imm: asm::Immediate::Value(packing_start_offset as u64),
});
num_int_args += 1;
packing_start_offset = offset;
packing_size = field_size;
}
} else if is_float(field_dtype) {
if is_packing {
self.translate_addi(
asm::Register::T2,
asm::Register::S0,
stack_offset,
&mut insts,
);
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::T2,
rs2: asm::Register::arg(
asm::RegisterType::Integer,
num_int_args,
),
imm: asm::Immediate::Value(packing_start_offset as u64),
});
num_int_args += 1;
is_packing = false;
}
self.translate_addi(
asm::Register::T2,
asm::Register::S0,
stack_offset,
&mut insts,
);
insts.push(asm::Instruction::SType {
instr: asm::SType::store(field_dtype.deref().clone()),
rs1: asm::Register::T2,
rs2: asm::Register::arg(
asm::RegisterType::FloatingPoint,
num_float_args,
),
imm: asm::Immediate::Value(offset as u64),
});
num_float_args += 1;
} else {
todo!()
}
}
if is_packing {
self.translate_addi(
asm::Register::T2,
asm::Register::S0,
stack_offset,
&mut insts,
);
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::T2,
rs2: asm::Register::arg(asm::RegisterType::Integer, num_int_args),
imm: asm::Immediate::Value(packing_start_offset as u64),
});
num_int_args += 1;
}
}
}
}
if !large_struct.is_empty() {
insts.push(asm::Instruction::IType {
instr: asm::IType::ADDI,
rd: asm::Register::Sp,
rs1: asm::Register::Sp,
imm: asm::Immediate::Value(!16 + 1),
});
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::Sp,
rs2: asm::Register::A1,
imm: asm::Immediate::Value(0),
});
insts.push(asm::Instruction::SType {
instr: asm::SType::SD,
rs1: asm::Register::Sp,
rs2: asm::Register::A2,
imm: asm::Immediate::Value(8),
});
for (i, dtype) in signature.params.iter().enumerate() {
if let Some(size) = is_struct(dtype, structs) {
let stack_offset = stack_offsets[&ir::RegisterId::arg(definition.bid_init, i)];
if size > 16 {
let reg_index = large_struct[&i];
self.translate_addi(
asm::Register::A0,
asm::Register::S0,
stack_offset,
&mut insts,
);
match reg_index {
0 => insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rd: asm::Register::A1,
rs1: asm::Register::S0,
imm: asm::Immediate::Value(!24 + 1),
}),
1 => insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rd: asm::Register::A1,
rs1: asm::Register::Sp,
imm: asm::Immediate::Value(0),
}),
2 => insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rd: asm::Register::A1,
rs1: asm::Register::Sp,
imm: asm::Immediate::Value(8),
}),
_ => insts.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd: asm::Register::A1,
rs: asm::Register::arg(asm::RegisterType::Integer, reg_index),
})),
}
insts.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
insts.push(asm::Instruction::Pseudo(asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
}));
}
}
}
insts.push(asm::Instruction::IType {
instr: asm::IType::ADDI,
rd: asm::Register::Sp,
rs1: asm::Register::Sp,
imm: asm::Immediate::Value(16),
});
}
Context {
insts,
stack_offsets,
stack_allocation,
new_blocks: Vec::new(),
inference_graph,
saved_reg_offset,
}
}
fn translate_epilogue(&mut self, context: &mut Context) {
if context.stack_allocation != 0 {
// S1~레지스터 및 FS0~ 레지스터 복원
let num_int_regs = context
.inference_graph
.vertices
.values()
.filter_map(|(_, reg)| {
if let asm::Register::Saved(asm::RegisterType::Integer, i) = reg {
Some(*i)
} else {
None
}
})
.collect::<HashSet<_>>()
.len();
let num_float_regs = context
.inference_graph
.vertices
.values()
.filter_map(|(_, reg)| {
if let asm::Register::Saved(asm::RegisterType::FloatingPoint, i) = reg {
Some(*i)
} else {
None
}
})
.collect::<HashSet<_>>()
.len();
for i in 0..num_float_regs {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::load(ir::Dtype::DOUBLE),
rd: asm::Register::saved(asm::RegisterType::FloatingPoint, i),
rs1: asm::Register::S0,
imm: asm::Immediate::Value(
(!(context.saved_reg_offset + num_int_regs * 8 + (i + 1) * 8) + 1) as u64,
),
});
}
for i in 0..num_int_regs {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rs1: asm::Register::S0,
rd: asm::Register::saved(asm::RegisterType::Integer, i + 1), // S0는 이 용도로 쓰지 않음
imm: asm::Immediate::Value(
(!(context.saved_reg_offset + (i + 1) * 8) + 1) as u64,
),
});
}
if context.inference_graph.analysis.has_call {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rd: asm::Register::Ra,
rs1: asm::Register::S0,
imm: asm::Immediate::Value(!16 + 1),
});
}
self.translate_load(
asm::IType::LD,
asm::Register::S0,
asm::Register::Sp,
context.stack_allocation - 8,
&mut context.insts,
);
self.translate_addi(
asm::Register::Sp,
asm::Register::Sp,
context.stack_allocation,
&mut context.insts,
);
}
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Ret));
}
fn translate_block(
&mut self,
name: &String,
bid: ir::BlockId,
block: &ir::Block,
context: &mut Context,
structs: &HashMap<String, Option<ir::Dtype>>,
) {
for (iid, inst) in block.instructions.iter().enumerate() {
let rid = ir::RegisterId::temp(bid, iid);
let rd = context.inference_graph.get_register(&rid);
let is_spilled = rd.is_none();
match inst.deref() {
ir::Instruction::Nop => (),
ir::Instruction::BinOp {
op,
lhs,
rhs,
dtype,
} => {
let org_operand_dtype = lhs.dtype();
let operand_dtype = upgrade_dtype(&org_operand_dtype);
let rs1 =
self.translate_load_operand(lhs, get_lhs_register(&operand_dtype), context);
let rd = rd.unwrap_or(get_res_register(dtype));
if let Some(ir::Constant::Int { value, .. }) = rhs.get_constant() {
let mut imm_mode = false;
let data_size = asm::DataSize::try_from(operand_dtype.clone()).unwrap();
match op {
ast::BinaryOperator::Plus
| ast::BinaryOperator::BitwiseAnd
| ast::BinaryOperator::BitwiseOr
| ast::BinaryOperator::BitwiseXor => {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: match op {
ast::BinaryOperator::Plus => {
asm::IType::Addi(data_size)
}
ast::BinaryOperator::BitwiseAnd => asm::IType::Andi,
ast::BinaryOperator::BitwiseOr => asm::IType::Ori,
ast::BinaryOperator::BitwiseXor => asm::IType::Xori,
_ => unreachable!(),
},
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
imm_mode = true;
}
}
ast::BinaryOperator::Minus => {
if (-2047..=2048).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::Addi(data_size),
rd,
rs1,
imm: asm::Immediate::Value((!value + 1) as u64),
});
imm_mode = true;
}
}
ast::BinaryOperator::ShiftLeft => {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::Slli(data_size),
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
imm_mode = true;
}
}
ast::BinaryOperator::ShiftRight => {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: if operand_dtype.is_int_signed() {
asm::IType::Srai(data_size)
} else {
asm::IType::Srli(data_size)
},
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
imm_mode = true;
}
}
ast::BinaryOperator::Less => {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::Slti {
is_signed: operand_dtype.is_int_signed(),
},
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
imm_mode = true;
}
}
ast::BinaryOperator::GreaterOrEqual => {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::Slti {
is_signed: operand_dtype.is_int_signed(),
},
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Seqz { rd, rs: rd },
));
imm_mode = true;
}
}
ast::BinaryOperator::Equals => {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::Xori,
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Seqz { rd, rs: rd },
));
imm_mode = true;
}
}
ast::BinaryOperator::NotEquals => {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::Xori,
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Snez { rd, rs: rd },
));
imm_mode = true;
}
}
_ => (),
}
if imm_mode {
if is_spilled {
self.translate_store_result(&rid, dtype.clone(), rd, context);
}
continue;
}
}
let rs2 =
self.translate_load_operand(rhs, get_rhs_register(&operand_dtype), context);
match op {
ast::BinaryOperator::Multiply => {
context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
asm::RType::mul(operand_dtype)
} else {
asm::RType::fmul(operand_dtype)
},
rd,
rs1,
rs2: Some(rs2),
});
}
ast::BinaryOperator::Divide => {
context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
let is_signed = operand_dtype.is_int_signed();
asm::RType::div(operand_dtype, is_signed)
} else {
asm::RType::fdiv(operand_dtype)
},
rd,
rs1,
rs2: Some(rs2),
});
}
ast::BinaryOperator::Modulo => {
let is_signed = operand_dtype.is_int_signed();
context.insts.push(asm::Instruction::RType {
instr: asm::RType::rem(operand_dtype, is_signed),
rd,
rs1,
rs2: Some(rs2),
});
}
ast::BinaryOperator::Plus => context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
asm::RType::add(operand_dtype)
} else {
asm::RType::fadd(operand_dtype)
},
rd,
rs1,
rs2: Some(rs2),
}),
ast::BinaryOperator::Minus => context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
asm::RType::sub(operand_dtype)
} else {
asm::RType::fsub(operand_dtype)
},
rd,
rs1,
rs2: Some(rs2),
}),
ast::BinaryOperator::ShiftLeft => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::sll(operand_dtype),
rd,
rs1,
rs2: Some(rs2),
});
}
ast::BinaryOperator::ShiftRight => {
context.insts.push(asm::Instruction::RType {
instr: if operand_dtype.is_int_signed() {
asm::RType::sra(operand_dtype)
} else {
asm::RType::srl(operand_dtype)
},
rd,
rs1,
rs2: Some(rs2),
});
}
ast::BinaryOperator::Less => context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
asm::RType::Slt {
is_signed: operand_dtype.is_int_signed(),
}
} else {
asm::RType::flt(operand_dtype)
},
rd,
rs1,
rs2: Some(rs2),
}),
ast::BinaryOperator::Greater => {
context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
asm::RType::Slt {
is_signed: operand_dtype.is_int_signed(),
}
} else {
asm::RType::flt(operand_dtype)
},
rd,
rs1: rs2,
rs2: Some(rs1),
});
}
ast::BinaryOperator::LessOrEqual => {
context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
asm::RType::Slt {
is_signed: operand_dtype.is_int_signed(),
}
} else {
asm::RType::flt(operand_dtype)
},
rd,
rs1: rs2,
rs2: Some(rs1),
});
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Seqz { rd, rs: rd }))
}
ast::BinaryOperator::GreaterOrEqual => {
context.insts.push(asm::Instruction::RType {
instr: if is_integer(&operand_dtype) {
asm::RType::Slt {
is_signed: operand_dtype.is_int_signed(),
}
} else {
asm::RType::flt(operand_dtype)
},
rd,
rs1,
rs2: Some(rs2),
});
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Seqz { rd, rs: rd }))
}
ast::BinaryOperator::Equals => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::Xor,
rd,
rs1,
rs2: Some(rs2),
});
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Seqz { rd, rs: rd }))
}
ast::BinaryOperator::NotEquals => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::Xor,
rd,
rs1,
rs2: Some(rs2),
});
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Snez { rd, rs: rd }));
}
ast::BinaryOperator::BitwiseAnd => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::And,
rd,
rs1,
rs2: Some(rs2),
});
}
ast::BinaryOperator::BitwiseXor => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::Xor,
rd,
rs1,
rs2: Some(rs2),
});
}
ast::BinaryOperator::BitwiseOr => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::Or,
rd,
rs1,
rs2: Some(rs2),
});
}
_ => unreachable!(),
}
if is_spilled {
self.translate_store_result(&rid, dtype.clone(), rd, context);
}
}
ir::Instruction::UnaryOp { op, operand, dtype } => {
let operand_dtype = operand.dtype();
let rs1 = self.translate_load_operand(
operand,
get_lhs_register(&operand_dtype),
context,
);
let rd = rd.unwrap_or(get_res_register(dtype));
match op {
ast::UnaryOperator::Minus => context.insts.push(asm::Instruction::Pseudo(
if is_integer(&operand_dtype) {
asm::Pseudo::neg(operand_dtype, rd, rs1)
} else {
asm::Pseudo::fneg(operand_dtype, rd, rs1)
},
)),
ast::UnaryOperator::Negate => context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Seqz { rd, rs: rs1 })),
_ => unreachable!(),
}
if is_spilled {
self.translate_store_result(&rid, dtype.clone(), rd, context);
}
}
ir::Instruction::Store { ptr, value } => {
let value_dtype = value.dtype();
match ptr {
ir::Operand::Constant(ir::Constant::GlobalVariable {
name: ptr_name,
..
}) => {
// TODO: 구조체
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::La {
rd: asm::Register::T0,
symbol: asm::Label(ptr_name.clone()),
}));
let rs2 = self.translate_load_operand(
value,
get_rhs_register(&value_dtype),
context,
);
context.insts.push(asm::Instruction::SType {
instr: asm::SType::store(value_dtype),
rs1: asm::Register::T0,
rs2,
imm: asm::Immediate::Value(0),
});
}
ir::Operand::Register {
rid: ptr_rid,
dtype: ptr_dtype,
} => match ptr_rid {
ir::RegisterId::Local { aid } => {
if let Some(size) = is_struct(&value_dtype, structs) {
self.translate_addi(
asm::Register::A0,
asm::Register::S0,
context.stack_offsets[ptr_rid],
&mut context.insts,
);
self.translate_addi(
asm::Register::A1,
asm::Register::S0,
context.stack_offsets[value.get_register().unwrap().0],
&mut context.insts,
);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
},
));
} else {
let rs2 = self.translate_load_operand(
value,
get_rhs_register(&value_dtype),
context,
);
self.translate_store(
asm::SType::store(value_dtype),
asm::Register::S0,
rs2,
context.stack_offsets[ptr_rid],
&mut context.insts,
);
}
}
_ => {
if let Some(size) = is_struct(&value_dtype, structs) {
let rs1 = self.translate_load_operand(
ptr,
get_lhs_register(ptr_dtype),
context,
); // TODO: 최적화? (MV와 합칠 수 있을듯)
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd: asm::Register::A0,
rs: rs1,
}));
self.translate_addi(
asm::Register::A1,
asm::Register::S0,
context.stack_offsets[value.get_register().unwrap().0],
&mut context.insts,
);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
},
));
} else {
let rs1 = self.translate_load_operand(
ptr,
get_lhs_register(ptr_dtype),
context,
);
let rs2 = self.translate_load_operand(
value,
get_rhs_register(&value_dtype),
context,
);
context.insts.push(asm::Instruction::SType {
instr: asm::SType::store(value_dtype),
rs1,
rs2,
imm: asm::Immediate::Value(0),
});
}
}
},
_ => unreachable!(),
}
}
ir::Instruction::Load { ptr } => match ptr {
ir::Operand::Constant(ir::Constant::GlobalVariable {
name: ptr_name,
dtype: value_dtype,
}) => {
// TODO: 구조체
let rd = rd.unwrap_or(get_res_register(value_dtype));
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::La {
rd: asm::Register::T0,
symbol: asm::Label(ptr_name.clone()),
}));
context.insts.push(asm::Instruction::IType {
instr: asm::IType::load(value_dtype.clone()),
rd,
rs1: asm::Register::T0,
imm: asm::Immediate::Value(0),
});
if is_spilled {
self.translate_store_result(&rid, value_dtype.clone(), rd, context);
}
}
ir::Operand::Register {
rid: ptr_rid,
dtype: ptr_dtype,
} => {
let value_dtype = ptr_dtype.get_pointer_inner().unwrap();
match ptr_rid {
ir::RegisterId::Local { aid } => {
if let Some(size) = is_struct(value_dtype, structs) {
self.translate_addi(
asm::Register::A0,
asm::Register::S0,
context.stack_offsets[&rid],
&mut context.insts,
);
self.translate_addi(
asm::Register::A1,
asm::Register::S0,
context.stack_offsets[ptr_rid],
&mut context.insts,
);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
},
));
} else {
let rd = rd.unwrap_or(get_res_register(value_dtype));
self.translate_load(
asm::IType::load(value_dtype.clone()),
rd,
asm::Register::S0,
context.stack_offsets[ptr_rid],
&mut context.insts,
);
if is_spilled {
self.translate_store_result(
&rid,
value_dtype.clone(),
rd,
context,
);
}
}
}
_ => {
if let Some(size) = is_struct(value_dtype, structs) {
let rs1 = self.translate_load_operand(
ptr,
get_lhs_register(ptr_dtype),
context,
); // TODO: 최적화? (MV와 합칠 수 있을듯)
self.translate_addi(
asm::Register::A0,
asm::Register::S0,
context.stack_offsets[&rid],
&mut context.insts,
);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd: asm::Register::A1,
rs: rs1,
}));
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
},
));
} else {
let rs1 = self.translate_load_operand(
ptr,
get_lhs_register(ptr_dtype),
context,
);
let rd = rd.unwrap_or(get_res_register(value_dtype));
context.insts.push(asm::Instruction::IType {
instr: asm::IType::load(value_dtype.clone()),
rd,
rs1,
imm: asm::Immediate::Value(0),
});
if is_spilled {
self.translate_store_result(
&rid,
value_dtype.clone(),
rd,
context,
);
}
}
}
}
}
_ => unreachable!(),
},
ir::Instruction::Call {
callee,
args,
return_type,
} => {
let mut num_int_args = 0;
let mut num_float_args = 0;
let mut struct_stack_allocations = 0;
let mut struct_stack_offsets = HashMap::new();
// 구조체 반환을 위한 공간 예약
if let Some(size) = is_struct(return_type, structs) {
if size > 16 {
num_int_args += 1;
struct_stack_allocations += ceil_to_multiple_of_16(size);
}
}
for (i, arg) in args.iter().enumerate() {
if let Some(size) = is_struct(&arg.dtype(), structs) {
if size > 16 {
let _ = struct_stack_offsets.insert(i, struct_stack_allocations);
struct_stack_allocations += ceil_to_multiple_of_16(size);
}
}
}
if struct_stack_allocations > 0 {
self.translate_addi(
asm::Register::Sp,
asm::Register::Sp,
!struct_stack_allocations + 1,
&mut context.insts,
);
}
// 구조체 인수 복사
for (i, arg) in args.iter().enumerate() {
if let Some(size) = is_struct(&arg.dtype(), structs) {
if size > 16 {
self.translate_addi(
asm::Register::A0,
asm::Register::Sp,
struct_stack_offsets[&i],
&mut context.insts,
);
match arg {
ir::Operand::Register { rid, .. } => match rid {
ir::RegisterId::Temp { bid, iid } => {
self.translate_addi(
asm::Register::A1,
asm::Register::S0,
context.stack_offsets[rid],
&mut context.insts,
);
}
_ => todo!(),
},
_ => todo!(),
}
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
}));
}
}
}
// 인수 전달
for (i, arg) in args.iter().enumerate() {
let dtype = arg.dtype();
if is_integer(&dtype) {
let rd = asm::Register::arg(asm::RegisterType::Integer, num_int_args);
let rs = self.translate_load_operand(arg, rd, context);
if rd != rs {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv { rd, rs }));
}
num_int_args += 1;
} else if is_float(&dtype) {
let rd = asm::Register::arg(
asm::RegisterType::FloatingPoint,
num_float_args,
);
let rs = self.translate_load_operand(arg, rd, context);
if rd != rs {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(dtype.clone()).unwrap(),
rd,
rs,
}));
}
num_float_args += 1;
} else if let Some(size) = is_struct(&dtype, structs) {
if size > 16 {
self.translate_addi(
asm::Register::arg(asm::RegisterType::Integer, num_int_args),
asm::Register::Sp,
struct_stack_offsets[&i],
&mut context.insts,
);
num_int_args += 1;
} else {
match arg {
ir::Operand::Register { rid, .. } => match rid {
ir::RegisterId::Temp { bid, iid } => {
self.translate_addi(
asm::Register::T2,
asm::Register::S0,
context.stack_offsets[rid],
&mut context.insts,
);
}
_ => todo!(),
},
_ => todo!(),
}
let (struct_dtype, fields) = get_struct_dtype(&dtype, structs);
let mut is_packing = false;
let mut packing_start_offset = 0;
let mut packing_size = 0;
for field_dtype in fields {
let (offset, _) = struct_dtype
.get_offset_struct_field(
field_dtype.name().unwrap(),
structs,
)
.unwrap();
let field_size = get_dtype_size(field_dtype, structs);
if is_integer(field_dtype) {
if !is_packing {
is_packing = true;
packing_start_offset = offset;
packing_size = field_size;
} else {
// TODO: 조건 수정 필요
if offset
== packing_start_offset + (packing_size as usize)
&& packing_size + field_size <= 8
{
packing_size += field_size;
} else {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rd: asm::Register::arg(
asm::RegisterType::Integer,
num_int_args,
),
rs1: asm::Register::T2,
imm: asm::Immediate::Value(
packing_start_offset as u64,
),
});
num_int_args += 1;
packing_start_offset = offset;
packing_size = field_size;
}
}
} else if is_float(field_dtype) {
if is_packing {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rd: asm::Register::arg(
asm::RegisterType::Integer,
num_int_args,
),
rs1: asm::Register::T2,
imm: asm::Immediate::Value(
packing_start_offset as u64,
),
});
num_int_args += 1;
is_packing = false;
}
context.insts.push(asm::Instruction::IType {
instr: asm::IType::load(field_dtype.deref().clone()),
rd: asm::Register::arg(
asm::RegisterType::FloatingPoint,
num_float_args,
),
rs1: asm::Register::T2,
imm: asm::Immediate::Value(packing_start_offset as u64),
});
num_float_args += 1;
} else {
todo!()
}
}
if is_packing {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::LD,
rd: asm::Register::arg(
asm::RegisterType::Integer,
num_int_args,
),
rs1: asm::Register::T2,
imm: asm::Immediate::Value(packing_start_offset as u64),
});
num_int_args += 1;
}
}
} else {
todo!();
}
}
if let Some(size) = is_struct(return_type, structs) {
if size > 16 {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd: asm::Register::A0,
rs: asm::Register::Sp,
})); // For returned structure
}
}
match callee {
ir::Operand::Constant(callee) => {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Call {
offset: asm::Label(callee.get_global_variable_name().unwrap()),
}))
}
ir::Operand::Register { rid, .. } => {
let rs =
self.translate_load_operand(callee, asm::Register::T0, context);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Jalr { rs }));
}
}
// TODO
if is_integer(return_type) {
if let Some(rd) = context.inference_graph.get_register(&rid) {
if rd != asm::Register::A0 {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd,
rs: asm::Register::A0,
}));
}
} else {
self.translate_store_result(
&rid,
return_type.clone(),
asm::Register::A0,
context,
);
}
} else if is_float(return_type) {
if let Some(rd) = context.inference_graph.get_register(&rid) {
if rd != asm::Register::FA0 {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(return_type.clone())
.unwrap(),
rd,
rs: asm::Register::FA0,
}));
}
} else {
self.translate_store_result(
&rid,
return_type.clone(),
asm::Register::FA0,
context,
);
}
} else if let Some(size) = is_struct(return_type, structs) {
if size > 16 {
self.translate_addi(
asm::Register::A0,
asm::Register::S0,
context.stack_offsets[&rid],
&mut context.insts,
);
self.translate_addi(
asm::Register::A1,
asm::Register::Sp,
0,
&mut context.insts,
);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
}));
} else {
let (struct_dtype, fields) = get_struct_dtype(return_type, structs);
let mut num_int_fields = 0;
let mut num_float_fields = 0;
for field_dtype in fields {
let (offset, _) = struct_dtype
.get_offset_struct_field(field_dtype.name().unwrap(), structs)
.unwrap();
if is_integer(field_dtype) {
self.translate_addi(
asm::Register::T2,
asm::Register::S0,
context.stack_offsets[&rid],
&mut context.insts,
);
context.insts.push(asm::Instruction::SType {
instr: asm::SType::store(field_dtype.deref().clone()),
rs1: asm::Register::T2,
rs2: asm::Register::arg(
asm::RegisterType::Integer,
num_int_fields,
),
imm: asm::Immediate::Value(0),
});
num_int_fields += 1;
} else if is_float(field_dtype) {
self.translate_addi(
asm::Register::T2,
asm::Register::S0,
context.stack_offsets[&rid],
&mut context.insts,
);
context.insts.push(asm::Instruction::SType {
instr: asm::SType::store(field_dtype.deref().clone()),
rs1: asm::Register::T2,
rs2: asm::Register::arg(
asm::RegisterType::FloatingPoint,
num_float_fields,
),
imm: asm::Immediate::Value(0),
});
num_float_fields += 1;
} else {
todo!()
}
}
}
}
if struct_stack_allocations > 0 {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::ADDI,
rd: asm::Register::Sp,
rs1: asm::Register::Sp,
imm: asm::Immediate::Value(struct_stack_allocations),
});
}
}
ir::Instruction::TypeCast {
value,
target_dtype,
} => {
let value_dtype = value.dtype();
let rs1 =
self.translate_load_operand(value, get_lhs_register(&value_dtype), context);
let rd = rd.unwrap_or(get_res_register(target_dtype));
match (&value_dtype, target_dtype) {
(
ir::Dtype::Int {
width, is_signed, ..
},
ir::Dtype::Int {
width: target_width,
is_signed: target_is_signed,
..
},
) => {
if target_width <= width {
let rs2 = get_rhs_register(&value_dtype);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: rs2,
imm: match target_width {
1 => 1,
8 => 0xFF,
16 => 0xFFFF,
32 => 0xFFFFFFFF,
64 => 0xFFFFFFFFFFFFFFFF,
_ => unreachable!(),
},
}));
context.insts.push(asm::Instruction::RType {
instr: asm::RType::And,
rd,
rs1,
rs2: Some(rs2),
});
if *target_is_signed {
match target_width {
8 => {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::slli(ir::Dtype::LONGLONG),
rd,
rs1: rd,
imm: asm::Immediate::Value(56),
});
context.insts.push(asm::Instruction::IType {
instr: asm::IType::srai(ir::Dtype::LONGLONG),
rd,
rs1: rd,
imm: asm::Immediate::Value(56),
});
}
16 => {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::slli(ir::Dtype::LONGLONG),
rd,
rs1: rd,
imm: asm::Immediate::Value(48),
});
context.insts.push(asm::Instruction::IType {
instr: asm::IType::srai(ir::Dtype::LONGLONG),
rd,
rs1: rd,
imm: asm::Immediate::Value(48),
});
}
32 => context.insts.push(asm::Instruction::Pseudo(
asm::Pseudo::SextW { rd, rs: rd },
)),
_ => todo!(),
}
}
} else if is_signed == target_is_signed {
if rd != rs1 {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd,
rs: rs1,
}));
}
} else if *target_is_signed {
let rs2 = get_rhs_register(&value_dtype);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: rs2,
imm: match target_width {
1 => 1,
8 => 0xFF,
16 => 0xFFFF,
32 => 0xFFFFFFFF,
64 => 0xFFFFFFFFFFFFFFFF,
_ => unreachable!(),
},
}));
context.insts.push(asm::Instruction::RType {
instr: asm::RType::And,
rd,
rs1,
rs2: Some(rs2),
});
} else {
let rs2 = get_rhs_register(&value_dtype);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: rs2,
imm: match width {
1 => 1,
8 => 0xFF,
16 => 0xFFFF,
32 => 0xFFFFFFFF,
64 => 0xFFFFFFFFFFFFFFFF,
_ => unreachable!(),
},
}));
context.insts.push(asm::Instruction::RType {
instr: asm::RType::And,
rd,
rs1,
rs2: Some(rs2),
});
}
if is_spilled {
self.translate_store_result(
&rid,
target_dtype.clone(),
rd,
context,
);
}
}
(ir::Dtype::Int { .. }, ir::Dtype::Pointer { .. }) => {
if is_spilled {
self.translate_store_result(
&rid,
target_dtype.clone(),
rs1,
context,
);
} else if rd != rs1 {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd,
rs: rs1,
}));
}
}
(ir::Dtype::Int { .. }, ir::Dtype::Float { .. }) => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::fcvt_int_to_float(
value.dtype(),
target_dtype.clone(),
),
rd,
rs1,
rs2: None,
});
if is_spilled {
self.translate_store_result(
&rid,
target_dtype.clone(),
rd,
context,
);
}
}
(ir::Dtype::Float { .. }, ir::Dtype::Float { .. }) => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::FcvtFloatToFloat {
from: asm::DataSize::try_from(value.dtype()).unwrap(),
to: asm::DataSize::try_from(target_dtype.clone()).unwrap(),
},
rd,
rs1,
rs2: None,
});
if is_spilled {
self.translate_store_result(
&rid,
target_dtype.clone(),
rd,
context,
);
}
}
(ir::Dtype::Float { .. }, ir::Dtype::Int { .. }) => {
context.insts.push(asm::Instruction::RType {
instr: asm::RType::fcvt_float_to_int(
value.dtype(),
target_dtype.clone(),
),
rd,
rs1,
rs2: None,
});
if is_spilled {
self.translate_store_result(
&rid,
target_dtype.clone(),
rd,
context,
);
}
}
_ => unreachable!(),
}
}
ir::Instruction::GetElementPtr { ptr, offset, .. } => {
let ptr_dtype = ptr.dtype();
let offset_dtype = offset.dtype();
let rd = rd.unwrap_or(get_res_register(&ptr_dtype));
match ptr {
ir::Operand::Constant(ir::Constant::GlobalVariable {
name: ptr_name,
..
}) => {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::La {
rd: asm::Register::T0,
symbol: asm::Label(ptr_name.clone()),
}));
let mut imm_mode = false;
if let Some(ir::Constant::Int { value, .. }) = offset.get_constant() {
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::ADDI,
rd,
rs1: asm::Register::T0,
imm: asm::Immediate::Value(*value as u64),
});
imm_mode = true;
}
}
if !imm_mode {
let rs2 = self.translate_load_operand(
offset,
get_rhs_register(&offset_dtype),
context,
);
context.insts.push(asm::Instruction::RType {
instr: asm::RType::add(ptr_dtype.clone()),
rd,
rs1: asm::Register::T0,
rs2: Some(rs2),
});
}
if is_spilled {
self.translate_store_result(&rid, ptr_dtype, rd, context);
}
}
ir::Operand::Register { rid: ptr_rid, .. } => match ptr_rid {
ir::RegisterId::Local { aid } => {
if let Some(ir::Constant::Int { value, .. }) = offset.get_constant()
{
self.translate_addi(
rd,
asm::Register::S0,
context.stack_offsets[ptr_rid] + (*value as u64),
&mut context.insts,
);
} else {
let rs2 = self.translate_load_operand(
offset,
get_rhs_register(&offset_dtype),
context,
);
self.translate_addi(
rd,
asm::Register::S0,
context.stack_offsets[ptr_rid],
&mut context.insts,
);
context.insts.push(asm::Instruction::RType {
instr: asm::RType::add(ptr_dtype.clone()),
rd,
rs1: rd,
rs2: Some(rs2),
});
}
if is_spilled {
self.translate_store_result(&rid, ptr_dtype, rd, context);
}
}
_ => {
let rs1 = self.translate_load_operand(
ptr,
get_lhs_register(&ptr_dtype),
context,
);
let mut imm_mode = false;
if let Some(ir::Constant::Int { value, .. }) = offset.get_constant()
{
if (-2048..=2047).contains(&(*value as i128)) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::ADDI,
rd,
rs1,
imm: asm::Immediate::Value(*value as u64),
});
imm_mode = true;
}
}
if !imm_mode {
let rs2 = self.translate_load_operand(
offset,
get_rhs_register(&offset_dtype),
context,
);
context.insts.push(asm::Instruction::RType {
instr: asm::RType::add(ptr_dtype.clone()),
rd,
rs1,
rs2: Some(rs2),
});
}
if is_spilled {
self.translate_store_result(&rid, ptr_dtype, rd, context);
}
}
},
_ => unreachable!(),
}
}
_ => todo!(),
}
}
match &block.exit {
ir::BlockExit::Jump { arg } => context
.insts
.append(&mut self.translate_phinode(name, arg, context, structs)),
ir::BlockExit::ConditionalJump {
condition,
arg_then,
arg_else,
} => {
let rs1 = get_lhs_register(&condition.dtype());
let rs1_real = self.translate_load_operand(condition, rs1, context);
let arg_then_label = self.translate_label(name, arg_then, context, structs);
let arg_else_label = self.translate_label(name, arg_else, context, structs);
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Seqz {
rd: rs1,
rs: rs1_real,
}));
context.insts.push(asm::Instruction::BType {
instr: asm::BType::Bne,
rs1,
rs2: asm::Register::Zero,
imm: arg_else_label,
});
context.insts.push(asm::Instruction::Pseudo(asm::Pseudo::J {
offset: arg_then_label,
})); // TODO: 최적화 가능? (점프 삭제)
}
ir::BlockExit::Switch {
value,
default,
cases,
} => {
let rs1 =
self.translate_load_operand(value, get_lhs_register(&value.dtype()), context);
let rs2 = get_rhs_register(&value.dtype());
let default_label = self.translate_label(name, default, context, structs);
for (case, arg) in cases {
let rs2 = self.translate_load_operand(
&ir::Operand::constant(case.clone()),
rs2,
context,
);
let case_label = self.translate_label(name, arg, context, structs);
context.insts.push(asm::Instruction::BType {
instr: asm::BType::Beq,
rs1,
rs2,
imm: case_label,
});
}
context.insts.push(asm::Instruction::Pseudo(asm::Pseudo::J {
offset: default_label,
})); // TODO: 최적화 가능? (점프 삭제)
}
ir::BlockExit::Return { value } => {
let dtype = value.dtype();
if is_integer(&dtype) {
let rd = self.translate_load_operand(value, asm::Register::A0, context);
if rd != asm::Register::A0 {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd: asm::Register::A0,
rs: rd,
}));
}
} else if is_float(&dtype) {
let rd = self.translate_load_operand(value, asm::Register::FA0, context);
if rd != asm::Register::FA0 {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(dtype).unwrap(),
rd: asm::Register::FA0,
rs: rd,
}));
}
} else if let Some(size) = is_struct(&dtype, structs) {
if size > 16 {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::load(ir::Dtype::pointer(ir::Dtype::unit())), // TODO: ok?
rd: asm::Register::A0,
rs1: asm::Register::S0,
imm: asm::Immediate::Value(!24 + 1),
});
match value {
ir::Operand::Constant(constant) => match constant {
ir::Constant::Undef { .. } => (), // Do nothing
_ => todo!(),
},
ir::Operand::Register { rid, .. } => match rid {
ir::RegisterId::Temp { bid, iid } => self.translate_addi(
asm::Register::A1,
asm::Register::S0,
context.stack_offsets[rid],
&mut context.insts,
),
_ => todo!(),
},
_ => unreachable!(),
}
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::A2,
imm: size,
}));
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Call {
offset: asm::Label(String::from("memcpy")),
}));
} else {
match value {
ir::Operand::Constant(constant) => match constant {
ir::Constant::Undef { .. } => (), // Do nothing
_ => todo!(),
},
ir::Operand::Register { rid, .. } => match rid {
ir::RegisterId::Temp { bid, iid } => self.translate_addi(
asm::Register::T2,
asm::Register::S0,
context.stack_offsets[rid],
&mut context.insts,
),
_ => todo!(),
},
_ => unreachable!(),
}
let (struct_dtype, fields) = get_struct_dtype(&dtype, structs);
let mut num_int_fields = 0;
let mut num_float_fields = 0;
for field_dtype in fields {
let (offset, _) = struct_dtype
.get_offset_struct_field(field_dtype.name().unwrap(), structs)
.unwrap();
if is_integer(field_dtype) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::load(field_dtype.deref().clone()),
rd: asm::Register::arg(
asm::RegisterType::Integer,
num_int_fields,
),
rs1: asm::Register::T2,
imm: asm::Immediate::Value(offset as u64),
});
num_int_fields += 1;
} else if is_float(field_dtype) {
context.insts.push(asm::Instruction::IType {
instr: asm::IType::load(field_dtype.deref().clone()),
rd: asm::Register::arg(
asm::RegisterType::FloatingPoint,
num_float_fields,
),
rs1: asm::Register::T2,
imm: asm::Immediate::Value(offset as u64),
});
num_float_fields += 1;
} else {
todo!()
}
}
}
}
self.translate_epilogue(context);
}
_ => unreachable!(),
}
}
fn translate_label(
&mut self,
name: &String,
arg: &ir::JumpArg,
context: &mut Context,
structs: &HashMap<String, Option<ir::Dtype>>,
) -> asm::Label {
if arg.args.is_empty() {
asm::Label::new(name, arg.bid)
} else {
let phinode_insts = self.translate_phinode(name, arg, context, structs);
let label = asm::Label(format!("{name}_P{}", self.phinode_counter));
self.phinode_counter += 1;
context.new_blocks.push((label.clone(), phinode_insts));
label
}
}
fn translate_load_operand(
&mut self,
operand: &ir::Operand,
rd: asm::Register,
context: &mut Context,
) -> asm::Register {
match operand {
ir::Operand::Constant(constant) => {
match constant {
ir::Constant::Undef { .. } => (), // Do nothing
ir::Constant::Int { value: 0, .. } => return asm::Register::Zero,
&ir::Constant::Int { value, .. } => {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd,
imm: value as u64,
}));
}
&ir::Constant::Float { value, width } => {
match width {
32 => context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::T2,
imm: (*value as f32).to_bits() as u64,
})),
64 => context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::T2,
imm: value.to_bits(),
})),
_ => unreachable!(),
}
context.insts.push(asm::Instruction::RType {
instr: asm::RType::fmv_int_to_float(ir::Dtype::float(width)),
rd,
rs1: asm::Register::T2,
rs2: None,
});
}
ir::Constant::GlobalVariable { name, .. } => {
context
.insts
.push(asm::Instruction::Pseudo(asm::Pseudo::La {
rd,
symbol: asm::Label(name.clone()),
}));
}
_ => unreachable!(),
}
rd
}
ir::Operand::Register { rid, dtype } => {
if matches!(rid, ir::RegisterId::Local { .. }) {
self.translate_addi(
rd,
asm::Register::S0,
context.stack_offsets[rid],
&mut context.insts,
);
rd
} else if let Some(asm_register) = context.inference_graph.get_register(rid) {
asm_register
} else {
self.translate_load(
asm::IType::load(dtype.clone()),
rd,
asm::Register::S0,
context.stack_offsets[rid],
&mut context.insts,
);
rd
}
}
}
}
fn translate_store_result(
&mut self,
rid: &ir::RegisterId,
dtype: ir::Dtype,
rs: asm::Register,
context: &mut Context,
) {
self.translate_store(
asm::SType::store(dtype),
asm::Register::S0,
rs,
context.stack_offsets[rid],
&mut context.insts,
);
}
fn translate_phinode(
&mut self,
name: &str,
arg: &ir::JumpArg,
context: &Context,
structs: &HashMap<String, Option<ir::Dtype>>,
) -> Vec<asm::Instruction> {
let bid = arg.bid;
let mut lives = context
.inference_graph
.lives
.get(&ir::RegisterId::temp(bid, 0))
.cloned()
.unwrap_or_default();
let mut referenced_org_phinodes = HashSet::new();
for arg in &arg.args {
if let ir::Operand::Register {
rid: ir::RegisterId::Arg { bid: arg_bid, aid },
..
} = arg
{
if *arg_bid == bid {
let _ = referenced_org_phinodes.insert(aid);
}
}
if let ir::Operand::Register { rid, .. } = arg {
let _ = lives.insert(*rid);
}
}
let mut used_asm_registers = lives
.iter()
.filter_map(|rid| context.inference_graph.get_register(rid))
.collect::<HashSet<_>>();
let mut phinode_temp_registers = HashMap::new();
let mut phinode_stack_offsets = HashMap::new();
let mut phinode_stack_allocations = 0;
for (aid, arg) in arg.args.iter().enumerate() {
if referenced_org_phinodes.contains(&aid) {
let dtype = arg.dtype();
// 자리가 있으면 기존 phinode 값을 레지스터에 백업
if is_integer(&dtype) {
let used_asm_a_registers = used_asm_registers
.iter()
.filter_map(|asm_reg| {
if let asm::Register::Arg(asm::RegisterType::Integer, i) = asm_reg {
Some(*i)
} else {
None
}
})
.collect();
let smallest_a = smallest_missing_integer(&used_asm_a_registers, 0);
if smallest_a <= 7 {
let asm_reg = asm::Register::arg(asm::RegisterType::Integer, smallest_a);
let _ = phinode_temp_registers.insert(aid, asm_reg);
let _ = used_asm_registers.insert(asm_reg);
continue;
}
} else if is_float(&dtype) {
let used_asm_a_registers = used_asm_registers
.iter()
.filter_map(|asm_reg| {
if let asm::Register::Arg(asm::RegisterType::FloatingPoint, i) = asm_reg
{
Some(*i)
} else {
None
}
})
.collect();
let smallest_a = smallest_missing_integer(&used_asm_a_registers, 0);
if smallest_a <= 7 {
let asm_reg =
asm::Register::arg(asm::RegisterType::FloatingPoint, smallest_a);
let _ = phinode_temp_registers.insert(aid, asm_reg);
let _ = used_asm_registers.insert(asm_reg);
continue;
}
} else {
todo!()
}
// 자리가 없으면 스택에
let _ = phinode_stack_offsets.insert(aid, phinode_stack_allocations);
phinode_stack_allocations +=
ceil_to_multiple_of_16(get_dtype_size(&arg.dtype(), structs));
}
}
let mut phinode_insts = Vec::new();
if phinode_stack_allocations > 0 {
self.translate_addi(
asm::Register::Sp,
asm::Register::Sp,
!phinode_stack_allocations + 1,
&mut phinode_insts,
);
}
// Swap Issue를 해결하기 위해 스택/레지스터에 기존 phinode값 저장 (필요한 것만)
let bid = arg.bid;
for (aid, arg) in arg.args.iter().enumerate() {
if referenced_org_phinodes.contains(&aid) {
let rid = ir::RegisterId::arg(bid, aid);
let arg_dtype = arg.dtype();
let rs1: asm::Register =
if let Some(rs1) = context.inference_graph.get_register(&rid) {
rs1
} else {
let rs1 = get_lhs_register(&arg_dtype);
self.translate_load(
asm::IType::load(arg_dtype.clone()),
rs1,
asm::Register::S0,
context.stack_offsets[&rid],
&mut phinode_insts,
);
rs1
};
if let Some(asm_reg) = phinode_temp_registers.get(&aid) {
if is_integer(&arg_dtype) {
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd: *asm_reg,
rs: rs1,
}));
} else if is_float(&arg_dtype) {
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(arg_dtype).unwrap(),
rd: *asm_reg,
rs: rs1,
}));
} else {
unreachable!()
}
} else {
self.translate_store(
asm::SType::store(arg_dtype),
asm::Register::Sp,
rs1,
phinode_stack_offsets[&aid],
&mut phinode_insts,
);
}
}
}
for (aid, arg) in arg.args.iter().enumerate() {
let arg_dtype = arg.dtype();
let rid = ir::RegisterId::arg(bid, aid);
let rd = context.inference_graph.get_register(&rid);
let is_spilled = rd.is_none();
let rd = rd.unwrap_or(get_lhs_register(&arg_dtype));
match arg {
ir::Operand::Constant(constant) => match constant {
ir::Constant::Undef { .. } => continue, // Do nothing
ir::Constant::Int { value: 0, .. } => {
if is_spilled {
self.translate_store(
asm::SType::store(arg.dtype()),
asm::Register::S0,
asm::Register::Zero,
context.stack_offsets[&ir::RegisterId::arg(bid, aid)],
&mut phinode_insts,
);
continue;
} else {
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd,
rs: asm::Register::Zero,
}));
}
}
ir::Constant::Int {
value,
width,
is_signed,
} => phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd,
imm: *value as u64,
})),
_ => todo!(),
},
ir::Operand::Register { rid: arg_rid, .. } => {
if let ir::RegisterId::Arg {
bid: arg_bid,
aid: arg_aid,
} = arg_rid
{
if *arg_bid == bid {
if let Some(asm_reg) = phinode_temp_registers.get(arg_aid) {
if is_integer(&arg_dtype) {
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd,
rs: *asm_reg,
}));
} else if is_float(&arg_dtype) {
phinode_insts.push(asm::Instruction::Pseudo(
asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(arg_dtype).unwrap(),
rd,
rs: *asm_reg,
},
));
} else {
unreachable!()
}
} else {
self.translate_load(
asm::IType::load(arg_dtype),
rd,
asm::Register::Sp,
phinode_stack_offsets[arg_aid],
&mut phinode_insts,
);
}
} else if let Some(rs1) = context.inference_graph.get_register(arg_rid) {
if is_integer(&arg_dtype) {
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Mv {
rd,
rs: rs1,
}));
} else if is_float(&arg_dtype) {
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(arg_dtype).unwrap(),
rd,
rs: rs1,
}));
} else {
todo!()
}
} else {
self.translate_load(
asm::IType::load(arg_dtype),
rd,
asm::Register::S0,
context.stack_offsets[arg_rid],
&mut phinode_insts,
);
}
} else if let Some(rs1) = context.inference_graph.get_register(arg_rid) {
if is_integer(&arg_dtype) {
phinode_insts
.push(asm::Instruction::Pseudo(asm::Pseudo::Mv { rd, rs: rs1 }));
} else if is_float(&arg_dtype) {
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::Fmv {
data_size: asm::DataSize::try_from(arg_dtype).unwrap(),
rd,
rs: rs1,
}));
} else {
todo!()
}
} else {
self.translate_load(
asm::IType::load(arg_dtype),
rd,
asm::Register::S0,
context.stack_offsets[arg_rid],
&mut phinode_insts,
);
}
}
}
if is_spilled {
self.translate_store(
asm::SType::store(arg.dtype()),
asm::Register::S0,
rd,
context.stack_offsets[&ir::RegisterId::arg(bid, aid)],
&mut phinode_insts,
);
}
}
if phinode_stack_allocations > 0 {
self.translate_addi(
asm::Register::Sp,
asm::Register::Sp,
phinode_stack_allocations,
&mut phinode_insts,
);
}
phinode_insts.push(asm::Instruction::Pseudo(asm::Pseudo::J {
offset: asm::Label::new(name, bid),
}));
phinode_insts
}
fn translate_addi(
&mut self,
rd: asm::Register,
rs1: asm::Register,
imm: u64,
insts: &mut Vec<asm::Instruction>,
) {
let imm_signed = imm as i64;
if (-2048..=2047).contains(&imm_signed) {
insts.push(asm::Instruction::IType {
instr: asm::IType::ADDI,
rd,
rs1,
imm: asm::Immediate::Value(imm),
});
} else {
insts.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::T2,
imm,
}));
insts.push(asm::Instruction::RType {
instr: asm::RType::add(ir::Dtype::int(64)),
rd,
rs1,
rs2: Some(asm::Register::T2),
});
}
}
fn translate_store(
&mut self,
instr: asm::SType,
rs1: asm::Register,
rs2: asm::Register,
imm: u64,
insts: &mut Vec<asm::Instruction>,
) {
let imm_signed = imm as i64;
if (-2048..=2047).contains(&imm_signed) {
insts.push(asm::Instruction::SType {
instr,
rs1,
rs2,
imm: asm::Immediate::Value(imm),
});
} else {
insts.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::T2,
imm,
}));
insts.push(asm::Instruction::RType {
instr: asm::RType::add(ir::Dtype::int(64)),
rd: asm::Register::T2,
rs1,
rs2: Some(asm::Register::T2),
});
insts.push(asm::Instruction::SType {
instr,
rs1: asm::Register::T2,
rs2,
imm: asm::Immediate::Value(0),
});
}
}
fn translate_load(
&mut self,
instr: asm::IType,
rd: asm::Register,
rs1: asm::Register,
imm: u64,
insts: &mut Vec<asm::Instruction>,
) {
let imm_signed = imm as i64;
if (-2048..=2047).contains(&imm_signed) {
insts.push(asm::Instruction::IType {
instr,
rd,
rs1,
imm: asm::Immediate::Value(imm),
});
} else {
insts.push(asm::Instruction::Pseudo(asm::Pseudo::Li {
rd: asm::Register::T2,
imm,
}));
insts.push(asm::Instruction::RType {
instr: asm::RType::add(ir::Dtype::int(64)),
rd: asm::Register::T2,
rs1,
rs2: Some(asm::Register::T2),
});
insts.push(asm::Instruction::IType {
instr,
rd,
rs1: asm::Register::T2,
imm: asm::Immediate::Value(0),
});
}
}
}
fn get_lhs_register(dtype: &ir::Dtype) -> asm::Register {
match dtype {
ir::Dtype::Int { .. } | ir::Dtype::Pointer { .. } => asm::Register::T0,
ir::Dtype::Float { .. } => asm::Register::FT0,
_ => todo!(),
}
}
fn get_rhs_register(dtype: &ir::Dtype) -> asm::Register {
match dtype {
ir::Dtype::Int { .. } | ir::Dtype::Pointer { .. } => asm::Register::T1,
ir::Dtype::Float { .. } => asm::Register::FT1,
_ => todo!(),
}
}
fn get_res_register(dtype: &ir::Dtype) -> asm::Register {
match dtype {
ir::Dtype::Int { .. } | ir::Dtype::Pointer { .. } => asm::Register::T1,
ir::Dtype::Float { .. } => asm::Register::FT1,
_ => todo!(),
}
}
fn ceil_to_multiple_of_16(x: u64) -> u64 {
(x + 15) & !15
}
fn get_dtype_size(dtype: &ir::Dtype, structs: &HashMap<String, Option<ir::Dtype>>) -> u64 {
let (size, _) = dtype.size_align_of(structs).unwrap();
size as u64
}
fn upgrade_dtype(dtype: &ir::Dtype) -> ir::Dtype {
match dtype {
ir::Dtype::Int {
width,
is_signed,
is_const,
} => {
if *width < 32 {
ir::Dtype::Int {
width: 32,
is_signed: *is_signed,
is_const: *is_const,
}
} else {
dtype.clone()
}
}
_ => dtype.clone(),
}
}
fn constant_to_directive(constant: ir::Constant) -> asm::Directive {
match constant {
ir::Constant::Int { value, width, .. } => match width {
8 => asm::Directive::Byte(value as u8),
16 => asm::Directive::Half(value as u16),
32 => asm::Directive::Word(value as u32),
64 => asm::Directive::Quad(value as u64),
_ => unreachable!(),
},
ir::Constant::Float { value, width } => match width {
32 => asm::Directive::Word((*value as f32).to_bits()),
64 => asm::Directive::Quad(value.to_bits()),
_ => unreachable!(),
},
_ => todo!(),
}
}
fn expression_to_constant(expression: &ast::Expression) -> ir::Constant {
match expression {
ast::Expression::Constant(constant) => ir::Constant::try_from(&constant.node).unwrap(),
ast::Expression::UnaryOperator(unary_op) => {
let operand = expression_to_constant(&unary_op.node.operand.node);
match unary_op.node.operator.node {
ast::UnaryOperator::Minus => match operand {
ir::Constant::Int {
value,
width,
is_signed,
} => ir::Constant::Int {
value: !value + 1,
width,
is_signed,
},
ir::Constant::Float { value, width } => ir::Constant::Float {
value: -value,
width,
},
_ => panic!(),
},
_ => panic!(),
}
}
_ => panic!(),
}
}
fn initializer_to_directives(initializer: &ast::Initializer) -> Vec<asm::Directive> {
match initializer {
ast::Initializer::Expression(expression) => vec![constant_to_directive(
expression_to_constant(&expression.node),
)],
ast::Initializer::List(list) => list
.iter()
.flat_map(|item| initializer_to_directives(&item.node.initializer.node))
.collect(),
}
}
fn get_constant_from_initializer(initializer: &ast::Initializer) -> ir::Constant {
match initializer {
ast::Initializer::Expression(expression) => match &expression.node {
ast::Expression::Constant(constant) => ir::Constant::try_from(&constant.node).unwrap(),
ast::Expression::UnaryOperator(op) => {
let operand = match &op.node.operand.node {
ast::Expression::Constant(constant) => {
ir::Constant::try_from(&constant.node).unwrap()
}
_ => panic!(),
};
match op.node.operator.node {
ast::UnaryOperator::Minus => match operand {
ir::Constant::Int {
value,
width,
is_signed,
} => ir::Constant::Int {
value: !value + 1,
width,
is_signed,
},
ir::Constant::Float { value, width } => ir::Constant::Float {
value: -value,
width,
},
_ => panic!(),
},
_ => panic!(),
}
}
_ => panic!(),
},
_ => panic!(),
}
}
fn is_integer(dtype: &ir::Dtype) -> bool {
matches!(dtype, ir::Dtype::Int { .. } | ir::Dtype::Pointer { .. })
}
fn is_float(dtype: &ir::Dtype) -> bool {
matches!(dtype, ir::Dtype::Float { .. })
}
fn is_struct(dtype: &ir::Dtype, structs: &HashMap<String, Option<ir::Dtype>>) -> Option<u64> {
if matches!(dtype, ir::Dtype::Struct { .. }) {
let (size, _) = dtype.size_align_of(structs).unwrap();
Some(size as u64)
} else {
None
}
}
fn get_struct_dtype<'a>(
dtype: &'a ir::Dtype,
structs: &'a HashMap<String, Option<ir::Dtype>>,
) -> (&'a ir::Dtype, &'a Vec<ir::Named<ir::Dtype>>) {
let struct_name = dtype.get_struct_name().unwrap().as_ref().unwrap();
let struct_dtype = structs[struct_name].as_ref().unwrap();
let fields = struct_dtype.get_struct_fields().unwrap().as_ref().unwrap();
(struct_dtype, fields)
}
fn get_number_of_register_arguments(
return_type: &ir::Dtype,
params: &[ir::Dtype],
structs: &HashMap<String, Option<ir::Dtype>>,
) -> (i32, i32, HashMap<usize, i32>) {
let mut num_int_args = 0;
let mut num_float_args = 0;
let mut primitive_arg_reg_index = HashMap::new();
if is_struct(return_type, structs).is_some_and(|size| size > 16) {
num_int_args += 1;
}
for (i, dtype) in params.iter().enumerate() {
if is_integer(dtype) {
let _ = primitive_arg_reg_index.insert(i, num_int_args);
num_int_args += 1;
} else if is_float(dtype) {
let _ = primitive_arg_reg_index.insert(i, num_float_args);
num_float_args += 1;
} else if let Some(size) = is_struct(dtype, structs) {
if size > 16 {
num_int_args += 1;
} else {
let (struct_dtype, fields) = get_struct_dtype(dtype, structs);
let mut is_packing = false;
let mut packing_start_offset = 0;
let mut packing_size = 0;
for field_dtype in fields {
let (offset, _) = struct_dtype
.get_offset_struct_field(field_dtype.name().unwrap(), structs)
.unwrap();
let field_size = get_dtype_size(field_dtype, structs);
if is_integer(field_dtype) {
if !is_packing {
is_packing = true;
packing_start_offset = offset;
packing_size = field_size;
} else if offset == packing_start_offset + (packing_size as usize)
&& packing_size + field_size <= 8
{
packing_size += field_size;
} else {
num_int_args += 1;
packing_start_offset = offset;
packing_size = field_size;
}
} else if is_float(field_dtype) {
if is_packing {
num_int_args += 1;
is_packing = false;
}
num_float_args += 1;
} else {
todo!()
}
}
if is_packing {
num_int_args += 1;
}
}
}
}
if num_int_args == 0 && is_integer(return_type) {
num_int_args = 1;
} else if num_float_args == 0 && is_float(return_type) {
num_float_args = 1;
}
(num_int_args, num_float_args, primitive_arg_reg_index)
}