Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
linux-arm
jg-open
Commits
88b42a00
Commit
88b42a00
authored
Oct 19, 2021
by
Joey Gouly
Browse files
WIP: convert inline asm to named operands
Signed-off-by:
Joey Gouly
<
joey.gouly@arm.com
>
parent
d9abdee5
Changes
9
Hide whitespace changes
Inline
Side-by-side
arch/arm64/include/asm/compiler.h
View file @
88b42a00
...
...
@@ -32,9 +32,9 @@
*/
#define function_nocfi(x) ({ \
void *addr; \
asm("adrp %
0
, " __stringify(x) "\n\t"
\
"add %
0, %0
, :lo12:" __stringify(x)
\
: "=r" (addr));
\
asm("adrp %
[addr]
, " __stringify(x) "\n\t" \
"add %
[addr], %[addr]
, :lo12:" __stringify(x) \
:
[addr]
"=r" (addr)); \
addr; \
})
#endif
...
...
arch/arm64/include/asm/kvm_mmu.h
View file @
88b42a00
...
...
@@ -125,13 +125,13 @@ void kvm_apply_hyp_relocations(void);
static
__always_inline
unsigned
long
__kern_hyp_va
(
unsigned
long
v
)
{
asm
volatile
(
ALTERNATIVE_CB
(
"and %
0, %0
, #1
\n
"
"ror %
0, %0
, #1
\n
"
"add %
0, %0
, #0
\n
"
"add %
0, %0
, #0, lsl 12
\n
"
"ror %
0, %0
, #63
\n
"
,
asm
volatile
(
ALTERNATIVE_CB
(
"and %
[v], %[v]
, #1
\n
"
"ror %
[v], %[v]
, #1
\n
"
"add %
[v], %[v]
, #0
\n
"
"add %
[v], %[v]
, #0, lsl 12
\n
"
"ror %
[v], %[v]
, #63
\n
"
,
kvm_update_va_mask
)
:
"+r"
(
v
));
:
[
v
]
"+r"
(
v
));
return
v
;
}
...
...
arch/arm64/include/asm/mte-kasan.h
View file @
88b42a00
...
...
@@ -31,8 +31,8 @@ static inline u8 mte_get_ptr_tag(void *ptr)
/* Get allocation tag for the address. */
static
inline
u8
mte_get_mem_tag
(
void
*
addr
)
{
asm
(
__MTE_PREAMBLE
"ldg %
0, [%0
]"
:
"+r"
(
addr
));
asm
(
__MTE_PREAMBLE
"ldg %
[addr], [%[addr]
]"
:
[
addr
]
"+r"
(
addr
));
return
mte_get_ptr_tag
(
addr
);
}
...
...
@@ -42,16 +42,16 @@ static inline u8 mte_get_random_tag(void)
{
void
*
addr
;
asm
(
__MTE_PREAMBLE
"irg %
0, %0
"
:
"=r"
(
addr
));
asm
(
__MTE_PREAMBLE
"irg %
[addr], %[addr]
"
:
[
addr
]
"=r"
(
addr
));
return
mte_get_ptr_tag
(
addr
);
}
static
inline
u64
__stg_post
(
u64
p
)
{
asm
volatile
(
__MTE_PREAMBLE
"stg %
0
, [%
0
], #16"
:
"+r"
(
p
)
asm
volatile
(
__MTE_PREAMBLE
"stg %
[p]
, [%
[p]
], #16"
:
[
p
]
"+r"
(
p
)
:
:
"memory"
);
return
p
;
...
...
@@ -59,8 +59,8 @@ static inline u64 __stg_post(u64 p)
static
inline
u64
__stzg_post
(
u64
p
)
{
asm
volatile
(
__MTE_PREAMBLE
"stzg %
0
, [%
0
], #16"
:
"+r"
(
p
)
asm
volatile
(
__MTE_PREAMBLE
"stzg %
[p]
, [%
[p]
], #16"
:
[
p
]
"+r"
(
p
)
:
:
"memory"
);
return
p
;
...
...
@@ -68,12 +68,12 @@ static inline u64 __stzg_post(u64 p)
static
inline
void
__dc_gva
(
u64
p
)
{
asm
volatile
(
__MTE_PREAMBLE
"dc gva, %
0
"
:
:
"r"
(
p
)
:
"memory"
);
asm
volatile
(
__MTE_PREAMBLE
"dc gva, %
[p]
"
:
:
[
p
]
"r"
(
p
)
:
"memory"
);
}
static
inline
void
__dc_gzva
(
u64
p
)
{
asm
volatile
(
__MTE_PREAMBLE
"dc gzva, %
0
"
:
:
"r"
(
p
)
:
"memory"
);
asm
volatile
(
__MTE_PREAMBLE
"dc gzva, %
[p]
"
:
:
[
p
]
"r"
(
p
)
:
"memory"
);
}
/*
...
...
arch/arm64/include/asm/percpu.h
View file @
88b42a00
...
...
@@ -13,10 +13,10 @@
static
inline
void
set_my_cpu_offset
(
unsigned
long
off
)
{
asm
volatile
(
ALTERNATIVE
(
"msr tpidr_el1, %
0
"
,
"msr tpidr_el2, %
0
"
,
asm
volatile
(
ALTERNATIVE
(
"msr tpidr_el1, %
[off]
"
,
"msr tpidr_el2, %
[off]
"
,
ARM64_HAS_VIRT_HOST_EXTN
)
::
"r"
(
off
)
:
"memory"
);
::
[
off
]
"r"
(
off
)
:
"memory"
);
}
static
inline
unsigned
long
__hyp_my_cpu_offset
(
void
)
...
...
@@ -36,10 +36,10 @@ static inline unsigned long __kern_my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
asm
(
ALTERNATIVE
(
"mrs %
0
, tpidr_el1"
,
"mrs %
0
, tpidr_el2"
,
asm
(
ALTERNATIVE
(
"mrs %
[off]
, tpidr_el1"
,
"mrs %
[off]
, tpidr_el2"
,
ARM64_HAS_VIRT_HOST_EXTN
)
:
"=r"
(
off
)
:
:
[
off
]
"=r"
(
off
)
:
"Q"
(
*
(
const
unsigned
long
*
)
current_stack_pointer
));
return
off
;
...
...
arch/arm64/include/asm/sysreg.h
View file @
88b42a00
...
...
@@ -1251,7 +1251,7 @@
*/
#define read_sysreg(r) ({ \
u64 __val; \
asm volatile("mrs %
0
, " __stringify(r) : "=r" (__val)); \
asm volatile("mrs %
[val]
, " __stringify(r) :
[val]
"=r" (__val)); \
__val; \
})
...
...
@@ -1271,7 +1271,7 @@
*/
#define read_sysreg_s(r) ({ \
u64 __val; \
asm volatile(__mrs_s("%
0
", r) : "=r" (__val)); \
asm volatile(__mrs_s("%
[val]
", r) :
[val]
"=r" (__val)); \
__val; \
})
...
...
arch/arm64/include/asm/tlbflush.h
View file @
88b42a00
...
...
@@ -37,12 +37,12 @@
: : )
#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op ", %
0
\n" \
"tlbi " #op ", %
[arrg]
\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %
0",
\
"dsb ish\n tlbi " #op ", %
[arrg]",
\
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : "r" (arg))
: :
[arrg]
"r" (arg))
#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
...
...
arch/arm64/include/asm/uaccess.h
View file @
88b42a00
...
...
@@ -53,7 +53,7 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
// 1: X = A + B; X' = X % 2^64
" adds %0, %3, %2
\n
"
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
" csel %
1
, xzr, %
1
, hi
\n
"
" csel %
[limit]
, xzr, %
[limit]
, hi
\n
"
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
// to compensate for the carry flag being set in step 4. For
// X > 2^64, X' merely has to remain nonzero, which it does.
...
...
@@ -63,7 +63,7 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
// testing X' - C == 0, subject to the previous adjustments.
" sbcs xzr, %0, %1
\n
"
" cset %0, ls
\n
"
:
"=&r"
(
ret
),
"+r"
(
limit
)
:
"Ir"
(
size
),
"0"
(
addr
)
:
"cc"
);
:
"=&r"
(
ret
),
[
limit
]
"+r"
(
limit
)
:
"Ir"
(
size
),
"0"
(
addr
)
:
"cc"
);
return
ret
;
}
...
...
arch/arm64/include/asm/vdso/compat_gettimeofday.h
View file @
88b42a00
...
...
@@ -149,7 +149,7 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
* where __arch_get_vdso_data() is called, and then keep the result in
* a register.
*/
asm
volatile
(
"mov %
0, %1"
:
"=r"
(
ret
)
:
"r"
(
_vdso_data
));
asm
volatile
(
"mov %
[ret], %[data]"
:
[
ret
]
"=r"
(
ret
)
:
[
data
]
"r"
(
_vdso_data
));
return
ret
;
}
...
...
@@ -161,7 +161,7 @@ const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
const
struct
vdso_data
*
ret
;
/* See __arch_get_vdso_data(). */
asm
volatile
(
"mov %
0, %1"
:
"=r"
(
ret
)
:
"r"
(
_timens_data
));
asm
volatile
(
"mov %
[ret], %[data]"
:
[
ret
]
"=r"
(
ret
)
:
[
data
]
"r"
(
_timens_data
));
return
ret
;
}
...
...
arch/arm64/include/asm/vdso/gettimeofday.h
View file @
88b42a00
...
...
@@ -82,7 +82,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
* is speculated.
*/
isb
();
asm
volatile
(
"mrs %
0
, cntvct_el0"
:
"=r"
(
res
)
::
"memory"
);
asm
volatile
(
"mrs %
[res]
, cntvct_el0"
:
[
res
]
"=r"
(
res
)
::
"memory"
);
arch_counter_enforce_ordering
(
res
);
return
res
;
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment