| /* |
| * linux/include/asm-arm/assembler.h |
| * |
| * Copyright (C) 1996-2000 Russell King |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| * |
| * This file contains arm architecture specific defines |
| * for the different processors. |
| * |
| * Do not include any C declarations in this file - it is included by |
| * assembler source. |
| */ |
| #ifndef __ASSEMBLY__ |
| #error "Only include this from assembly code" |
| #endif |
| |
| #include <asm/ptrace.h> |
| |
| /* |
| * Endian independent macros for shifting bytes within registers. |
| */ |
| #ifndef __ARMEB__ |
| #define pull lsr |
| #define push lsl |
| #define get_byte_0 lsl #0 |
| #define get_byte_1 lsr #8 |
| #define get_byte_2 lsr #16 |
| #define get_byte_3 lsr #24 |
| #define put_byte_0 lsl #0 |
| #define put_byte_1 lsl #8 |
| #define put_byte_2 lsl #16 |
| #define put_byte_3 lsl #24 |
| #else |
| #define pull lsl |
| #define push lsr |
| #define get_byte_0 lsr #24 |
| #define get_byte_1 lsr #16 |
| #define get_byte_2 lsr #8 |
| #define get_byte_3 lsl #0 |
| #define put_byte_0 lsl #24 |
| #define put_byte_1 lsl #16 |
| #define put_byte_2 lsl #8 |
| #define put_byte_3 lsl #0 |
| #endif |
| |
| /* |
| * Data preload for architectures that support it |
| */ |
| #if __LINUX_ARM_ARCH__ >= 5 |
| #define PLD(code...) code |
| #else |
| #define PLD(code...) |
| #endif |
| |
| /* |
| * This can be used to enable code to cacheline align the destination |
| * pointer when bulk writing to memory. Experiments on StrongARM and |
| * XScale didn't show this a worthwhile thing to do when the cache is not |
| * set to write-allocate (this would need further testing on XScale when WA |
| * is used). |
| * |
| * On Feroceon there is much to gain however, regardless of cache mode. |
| */ |
| #ifdef CONFIG_CPU_FEROCEON |
| #define CALGN(code...) code |
| #else |
| #define CALGN(code...) |
| #endif |
| |
| /* |
| * Enable and disable interrupts |
| */ |
| #if __LINUX_ARM_ARCH__ >= 6 |
| .macro disable_irq |
| cpsid i |
| .endm |
| |
| .macro enable_irq |
| cpsie i |
| .endm |
| #else |
| .macro disable_irq |
| msr cpsr_c, #PSR_I_BIT | SVC_MODE |
| .endm |
| |
| .macro enable_irq |
| msr cpsr_c, #SVC_MODE |
| .endm |
| #endif |
| |
| /* |
| * Save the current IRQ state and disable IRQs. Note that this macro |
| * assumes FIQs are enabled, and that the processor is in SVC mode. |
| */ |
| .macro save_and_disable_irqs, oldcpsr |
| mrs \oldcpsr, cpsr |
| disable_irq |
| .endm |
| |
| /* |
| * Restore interrupt state previously stored in a register. We don't |
| * guarantee that this will preserve the flags. |
| */ |
| .macro restore_irqs, oldcpsr |
| msr cpsr_c, \oldcpsr |
| .endm |
| |
| #define USER(x...) \ |
| 9999: x; \ |
| .section __ex_table,"a"; \ |
| .align 3; \ |
| .long 9999b,9001f; \ |
| .previous |