From 3db631716f9e4ecaac0e6b694309ae42e73f8b93 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Mon, 14 Sep 2009 03:14:48 -0500 Subject: [PATCH] vm: More indentation fixes --- vm/cpu-ppc.S | 73 ++++++++++++++++---------------- vm/cpu-x86.32.S | 28 ++++++------ vm/cpu-x86.64.S | 20 ++++----- vm/cpu-x86.S | 110 ++++++++++++++++++++++++------------------------ 4 files changed, 115 insertions(+), 116 deletions(-) diff --git a/vm/cpu-ppc.S b/vm/cpu-ppc.S index 342ec83d7e..e7a210b7aa 100644 --- a/vm/cpu-ppc.S +++ b/vm/cpu-ppc.S @@ -88,11 +88,12 @@ multiply_overflow: #define RESTORE_FP(register,offset) lfd register,SAVE_AT(offset)(r1) #define SAVE_V(register,offset) \ - li r2,SAVE_AT(offset) XX \ - stvxl register,r2,r1 + li r2,SAVE_AT(offset) XX \ + stvxl register,r2,r1 + #define RESTORE_V(register,offset) \ - li r2,SAVE_AT(offset) XX \ - lvxl register,r2,r1 + li r2,SAVE_AT(offset) XX \ + lvxl register,r2,r1 #define PROLOGUE \ mflr r0 XX /* get caller's return address */ \ @@ -104,8 +105,6 @@ multiply_overflow: lwz r1,0(r1) XX /* destroy the stack frame */ \ mtlr r0 /* get ready to return */ - - /* We have to save and restore nonvolatile registers because the Factor compiler treats the entire register file as volatile. */ DEF(void,c_to_factor,(CELL quot)): @@ -288,44 +287,44 @@ DEF(void,flush_icache,(void *start, int len)): blr DEF(void,primitive_inline_cache_miss,(void)): - mflr r6 + mflr r6 DEF(void,primitive_inline_cache_miss_tail,(void)): - PROLOGUE - mr r3,r6 - bl MANGLE(inline_cache_miss) - EPILOGUE - mtctr r3 - bctr + PROLOGUE + mr r3,r6 + bl MANGLE(inline_cache_miss) + EPILOGUE + mtctr r3 + bctr DEF(void,get_ppc_fpu_env,(void*)): - mffs f0 - stfd f0,0(r3) - blr + mffs f0 + stfd f0,0(r3) + blr DEF(void,set_ppc_fpu_env,(const void*)): - lfd f0,0(r3) - mtfsf 0xff,f0 - blr + lfd f0,0(r3) + mtfsf 0xff,f0 + blr DEF(void,get_ppc_vmx_env,(void*)): - mfvscr v0 - subi r4,r1,16 - li r5,0xf - andc r4,r4,r5 - stvxl v0,0,r4 - li r5,0xc - lwzx r6,r5,r4 - stw r6,0(r3) - blr + mfvscr v0 + subi r4,r1,16 + li r5,0xf + andc r4,r4,r5 + stvxl v0,0,r4 + li r5,0xc + lwzx r6,r5,r4 + stw r6,0(r3) + blr DEF(void,set_ppc_vmx_env,(const void*)): - subi r4,r1,16 - li r5,0xf - andc r4,r4,r5 - li r5,0xc - lwz r6,0(r3) - stwx r6,r5,r4 - lvxl v0,0,r4 - mtvscr v0 - blr + subi r4,r1,16 + li r5,0xf + andc r4,r4,r5 + li r5,0xc + lwz r6,0(r3) + stwx r6,r5,r4 + lvxl v0,0,r4 + mtvscr v0 + blr diff --git a/vm/cpu-x86.32.S b/vm/cpu-x86.32.S index 0c4166cfe5..87a0e03f99 100644 --- a/vm/cpu-x86.32.S +++ b/vm/cpu-x86.32.S @@ -58,26 +58,26 @@ DEF(void,primitive_inline_cache_miss_tail,(void)): jmp *%eax DEF(void,get_sse_env,(void*)): - movl 4(%esp), %eax - stmxcsr (%eax) - ret + movl 4(%esp), %eax + stmxcsr (%eax) + ret DEF(void,set_sse_env,(const void*)): - movl 4(%esp), %eax - ldmxcsr (%eax) - ret + movl 4(%esp), %eax + ldmxcsr (%eax) + ret DEF(void,get_x87_env,(void*)): - movl 4(%esp), %eax - fnstsw (%eax) - fnstcw 2(%eax) - ret + movl 4(%esp), %eax + fnstsw (%eax) + fnstcw 2(%eax) + ret DEF(void,set_x87_env,(const void*)): - movl 4(%esp), %eax - fnclex - fldcw 2(%eax) - ret + movl 4(%esp), %eax + fnclex + fldcw 2(%eax) + ret #include "cpu-x86.S" diff --git a/vm/cpu-x86.64.S b/vm/cpu-x86.64.S index e6d9d88810..0da360e675 100644 --- a/vm/cpu-x86.64.S +++ b/vm/cpu-x86.64.S @@ -89,21 +89,21 @@ DEF(void,primitive_inline_cache_miss_tail,(void)): jmp *%rax DEF(void,get_sse_env,(void*)): - stmxcsr (%rdi) - ret + stmxcsr (%rdi) + ret DEF(void,set_sse_env,(const void*)): - ldmxcsr (%rdi) - ret + ldmxcsr (%rdi) + ret DEF(void,get_x87_env,(void*)): - fnstsw (%rdi) - fnstcw 2(%rdi) - ret + fnstsw (%rdi) + fnstcw 2(%rdi) + ret DEF(void,set_x87_env,(const void*)): - fnclex - fldcw 2(%rdi) - ret + fnclex + fldcw 2(%rdi) + ret #include "cpu-x86.S" diff --git a/vm/cpu-x86.S b/vm/cpu-x86.S index 09e742bed8..d229b2cb79 100644 --- a/vm/cpu-x86.S +++ b/vm/cpu-x86.S @@ -1,38 +1,38 @@ DEF(void,primitive_fixnum_add,(void)): - mov (DS_REG),ARG0 - mov -CELL_SIZE(DS_REG),ARG1 - sub $CELL_SIZE,DS_REG - mov ARG1,ARITH_TEMP_1 - add ARG0,ARITH_TEMP_1 - jo MANGLE(overflow_fixnum_add) - mov ARITH_TEMP_1,(DS_REG) - ret + mov (DS_REG),ARG0 + mov -CELL_SIZE(DS_REG),ARG1 + sub $CELL_SIZE,DS_REG + mov ARG1,ARITH_TEMP_1 + add ARG0,ARITH_TEMP_1 + jo MANGLE(overflow_fixnum_add) + mov ARITH_TEMP_1,(DS_REG) + ret DEF(void,primitive_fixnum_subtract,(void)): - mov (DS_REG),ARG1 - mov -CELL_SIZE(DS_REG),ARG0 - sub $CELL_SIZE,DS_REG - mov ARG0,ARITH_TEMP_1 - sub ARG1,ARITH_TEMP_1 - jo MANGLE(overflow_fixnum_subtract) - mov ARITH_TEMP_1,(DS_REG) - ret + mov (DS_REG),ARG1 + mov -CELL_SIZE(DS_REG),ARG0 + sub $CELL_SIZE,DS_REG + mov ARG0,ARITH_TEMP_1 + sub ARG1,ARITH_TEMP_1 + jo MANGLE(overflow_fixnum_subtract) + mov ARITH_TEMP_1,(DS_REG) + ret DEF(void,primitive_fixnum_multiply,(void)): - mov (DS_REG),ARITH_TEMP_1 - mov ARITH_TEMP_1,DIV_RESULT - mov -CELL_SIZE(DS_REG),ARITH_TEMP_2 - sar $3,ARITH_TEMP_2 - sub $CELL_SIZE,DS_REG - imul ARITH_TEMP_2 - jo multiply_overflow - mov DIV_RESULT,(DS_REG) - ret + mov (DS_REG),ARITH_TEMP_1 + mov ARITH_TEMP_1,DIV_RESULT + mov -CELL_SIZE(DS_REG),ARITH_TEMP_2 + sar $3,ARITH_TEMP_2 + sub $CELL_SIZE,DS_REG + imul ARITH_TEMP_2 + jo multiply_overflow + mov DIV_RESULT,(DS_REG) + ret multiply_overflow: - sar $3,ARITH_TEMP_1 - mov ARITH_TEMP_1,ARG0 - mov ARITH_TEMP_2,ARG1 - jmp MANGLE(overflow_fixnum_multiply) + sar $3,ARITH_TEMP_1 + mov ARITH_TEMP_1,ARG0 + mov ARITH_TEMP_2,ARG1 + jmp MANGLE(overflow_fixnum_multiply) DEF(F_FASTCALL void,c_to_factor,(CELL quot)): PUSH_NONVOLATILE @@ -77,38 +77,38 @@ DEF(F_FASTCALL void,lazy_jit_compile,(CELL quot)): DEF(bool,sse_version,(void)): mov $0x1,RETURN_REG cpuid - /* test $0x100000,%ecx - jnz sse_42 - test $0x80000,%ecx - jnz sse_41 - test $0x200,%ecx - jnz ssse_3 */ - test $0x1,%ecx - jnz sse_3 - test $0x4000000,%edx - jnz sse_2 - test $0x2000000,%edx - jnz sse_1 - mov $0,%eax - ret + /* test $0x100000,%ecx + jnz sse_42 + test $0x80000,%ecx + jnz sse_41 + test $0x200,%ecx + jnz ssse_3 */ + test $0x1,%ecx + jnz sse_3 + test $0x4000000,%edx + jnz sse_2 + test $0x2000000,%edx + jnz sse_1 + mov $0,%eax + ret sse_42: - mov $42,RETURN_REG - ret + mov $42,RETURN_REG + ret sse_41: - mov $41,RETURN_REG - ret + mov $41,RETURN_REG + ret ssse_3: - mov $33,RETURN_REG - ret + mov $33,RETURN_REG + ret sse_3: - mov $30,RETURN_REG - ret + mov $30,RETURN_REG + ret sse_2: - mov $20,RETURN_REG - ret + mov $20,RETURN_REG + ret sse_1: - mov $10,RETURN_REG - ret + mov $10,RETURN_REG + ret #ifdef WINDOWS .section .drectve .ascii " -export:sse_version"