提交 f4815ac6 编写于 作者: H Heiko Carstens 提交者: Martin Schwidefsky

s390/headers: replace __s390x__ with CONFIG_64BIT where possible

Replace __s390x__ with CONFIG_64BIT in all places that are not exported
to userspace or guarded with #ifdef __KERNEL__.
Signed-off-by: NHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 da477737
......@@ -61,7 +61,7 @@ extern const char _ni_bitmap[];
extern const char _zb_findmap[];
extern const char _sb_findmap[];
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define __BITOPS_ALIGN 3
#define __BITOPS_WORDSIZE 32
......@@ -81,7 +81,7 @@ extern const char _sb_findmap[];
: "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc");
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define __BITOPS_ALIGN 7
#define __BITOPS_WORDSIZE 64
......@@ -101,7 +101,7 @@ extern const char _sb_findmap[];
: "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc");
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
......@@ -410,7 +410,7 @@ static inline unsigned long __ffz_word_loop(const unsigned long *addr,
unsigned long bytes = 0;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" ahi %1,-1\n"
" sra %1,5\n"
" jz 1f\n"
......@@ -447,7 +447,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
unsigned long bytes = 0;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" ahi %1,-1\n"
" sra %1,5\n"
" jz 1f\n"
......@@ -479,7 +479,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
*/
static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
if ((word & 0xffffffff) == 0xffffffff) {
word >>= 32;
nr += 32;
......@@ -503,7 +503,7 @@ static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
*/
static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
if ((word & 0xffffffff) == 0) {
word >>= 32;
nr += 32;
......@@ -544,7 +544,7 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
unsigned long word;
p = (unsigned long *)((unsigned long) p + offset);
#ifndef __s390x__
#ifndef CONFIG_64BIT
asm volatile(
" ic %0,%O1(%R1)\n"
" icm %0,2,%O1+1(%R1)\n"
......
......@@ -21,15 +21,15 @@ typedef unsigned long long __nocast cputime64_t;
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
#ifndef __s390x__
#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = n >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
return rp.subreg.odd;
#else /* __s390x__ */
#else /* CONFIG_64BIT */
return n / base;
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
}
#define cputime_one_jiffy jiffies_to_cputime(1)
......@@ -100,7 +100,7 @@ static inline void cputime_to_timespec(const cputime_t cputime,
struct timespec *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__
#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = __cputime >> 1;
......@@ -128,7 +128,7 @@ static inline void cputime_to_timeval(const cputime_t cputime,
struct timeval *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__
#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = __cputime >> 1;
......
......@@ -7,7 +7,7 @@
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
#ifdef __s390x__
#ifdef CONFIG_64BIT
#define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \
......@@ -25,7 +25,7 @@
: "i" (low), "i" (high)); \
})
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \
......@@ -43,7 +43,7 @@
: "i" (low), "i" (high)); \
})
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define __ctl_set_bit(cr, bit) ({ \
unsigned long __dummy; \
......
......@@ -107,11 +107,11 @@
/*
* These are used to set parameters in the core dumps.
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define ELF_CLASS ELFCLASS32
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define ELF_CLASS ELFCLASS64
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
......@@ -181,9 +181,9 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
......@@ -194,7 +194,7 @@ do { \
else \
clear_thread_flag(TIF_31BIT); \
} while (0)
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define STACK_RND_MASK 0x7ffUL
......
......@@ -20,7 +20,7 @@
#include <asm/cio.h>
#include <asm/uaccess.h>
#ifdef __s390x__
#ifdef CONFIG_64BIT
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#else
#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
......@@ -33,7 +33,7 @@
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
return ((__pa(vaddr) + length - 1) >> 31) != 0;
#else
return 0;
......@@ -78,7 +78,7 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
unsigned int nridaws;
unsigned long *idal;
......@@ -105,7 +105,7 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
......@@ -182,7 +182,7 @@ idal_buffer_free(struct idal_buffer *ib)
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
#else
......
......@@ -49,7 +49,7 @@ static inline int init_new_context(struct task_struct *tsk,
#define destroy_context(mm) do { } while (0)
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define LCTL_OPCODE "lctl"
#else
#define LCTL_OPCODE "lctlg"
......
......@@ -28,7 +28,7 @@ struct mod_arch_specific
struct mod_arch_syminfo *syminfo;
};
#ifdef __s390x__
#ifdef CONFIG_64BIT
#define ElfW(x) Elf64_ ## x
#define ELFW(x) ELF64_ ## x
#else
......
......@@ -15,7 +15,7 @@
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
......
......@@ -48,7 +48,7 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
clear_table(crst, entry, sizeof(unsigned long)*2048);
}
#ifndef __s390x__
#ifndef CONFIG_64BIT
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
......@@ -64,7 +64,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
#define pgd_populate(mm, pgd, pud) BUG()
#define pud_populate(mm, pud, pmd) BUG()
#else /* __s390x__ */
#else /* CONFIG_64BIT */
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
......@@ -106,7 +106,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
......
......@@ -74,15 +74,15 @@ static inline int is_zero_pfn(unsigned long pfn)
* table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
# define PMD_SHIFT 20
# define PUD_SHIFT 20
# define PGDIR_SHIFT 20
#else /* __s390x__ */
#else /* CONFIG_64BIT */
# define PMD_SHIFT 20
# define PUD_SHIFT 31
# define PGDIR_SHIFT 42
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
......@@ -98,13 +98,13 @@ static inline int is_zero_pfn(unsigned long pfn)
* that leads to 1024 pte per pgd
*/
#define PTRS_PER_PTE 256
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define PTRS_PER_PMD 1
#define PTRS_PER_PUD 1
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
#define PTRS_PER_PUD 2048
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0
......@@ -276,7 +276,7 @@ extern struct page *vmemmap;
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
/* Bits in the segment table address-space-control-element */
#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
......@@ -308,7 +308,7 @@ extern struct page *vmemmap;
#define KVM_UR_BIT 0x00008000UL
#define KVM_UC_BIT 0x00004000UL
#else /* __s390x__ */
#else /* CONFIG_64BIT */
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
......@@ -363,7 +363,7 @@ extern struct page *vmemmap;
#define KVM_UR_BIT 0x0000800000000000UL
#define KVM_UC_BIT 0x0000400000000000UL
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
/*
* A user page table pointer has the space-switch-event bit, the
......@@ -424,7 +424,7 @@ static inline int mm_has_pgste(struct mm_struct *mm)
/*
* pgd/pmd/pte query functions
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline int pgd_none(pgd_t pgd) { return 0; }
......@@ -434,7 +434,7 @@ static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
#else /* __s390x__ */
#else /* CONFIG_64BIT */
static inline int pgd_present(pgd_t pgd)
{
......@@ -490,7 +490,7 @@ static inline int pud_bad(pud_t pud)
return (pud_val(pud) & mask) != 0;
}
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
static inline int pmd_present(pmd_t pmd)
{
......@@ -741,7 +741,7 @@ static inline int pte_young(pte_t pte)
static inline void pgd_clear(pgd_t *pgd)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
#endif
......@@ -749,7 +749,7 @@ static inline void pgd_clear(pgd_t *pgd)
static inline void pud_clear(pud_t *pud)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
#endif
......@@ -921,7 +921,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
#ifndef __s390x__
#ifndef CONFIG_64BIT
/* pto must point to the start of the segment table */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
#else
......@@ -1116,7 +1116,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pmd) ({ BUG(); 0UL; })
......@@ -1125,7 +1125,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pud_offset(pgd, address) ((pud_t *) pgd)
#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
......@@ -1147,7 +1147,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return pmd + pmd_index(address);
}
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
......@@ -1196,7 +1196,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
* 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
* 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define __SWP_OFFSET_MASK (~0UL >> 12)
#else
#define __SWP_OFFSET_MASK (~0UL >> 11)
......@@ -1217,11 +1217,11 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#ifndef __s390x__
#ifndef CONFIG_64BIT
# define PTE_FILE_MAX_BITS 26
#else /* __s390x__ */
#else /* CONFIG_64BIT */
# define PTE_FILE_MAX_BITS 59
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define pte_to_pgoff(__pte) \
((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
......
......@@ -39,27 +39,27 @@ extern int sysctl_ieee_emulation_warnings;
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define TASK_SIZE (1UL << 31)
#define TASK_UNMAPPED_BASE (1UL << 30)
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define STACK_TOP (1UL << 31)
#define STACK_TOP_MAX (1UL << 31)
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
#define STACK_TOP_MAX (1UL << 42)
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
......@@ -179,7 +179,7 @@ static inline void psw_set_key(unsigned int key)
*/
static inline void __load_psw(psw_t psw)
{
#ifndef __s390x__
#ifndef CONFIG_64BIT
asm volatile("lpsw %0" : : "Q" (psw) : "cc");
#else
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
......@@ -197,7 +197,7 @@ static inline void __load_psw_mask (unsigned long mask)
psw.mask = mask;
#ifndef __s390x__
#ifndef CONFIG_64BIT
asm volatile(
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
......@@ -205,14 +205,14 @@ static inline void __load_psw_mask (unsigned long mask)
" lpsw %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
#else /* __s390x__ */
#else /* CONFIG_64BIT */
asm volatile(
" larl %0,1f\n"
" stg %0,%O1+8(%R1)\n"
" lpswe %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
}
/*
......@@ -220,7 +220,7 @@ static inline void __load_psw_mask (unsigned long mask)
*/
static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
{
#ifndef __s390x__
#ifndef CONFIG_64BIT
if (psw.addr & PSW_ADDR_AMODE)
/* 31 bit mode */
return (psw.addr - ilc) | PSW_ADDR_AMODE;
......@@ -250,7 +250,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
* Store status and then load disabled wait psw,
* the processor is dead afterwards
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
asm volatile(
" stctl 0,0,0(%2)\n"
" ni 0(%2),0xef\n" /* switch off protection */
......@@ -269,7 +269,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
" lpsw 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
#else /* __s390x__ */
#else /* CONFIG_64BIT */
asm volatile(
" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */
......@@ -302,7 +302,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
" lpswe 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
while (1);
}
......@@ -338,7 +338,7 @@ extern void (*s390_base_ext_handler_fn)(void);
/*
* Helper macro for exception table entries
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define EX_TABLE(_fault,_target) \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
......
......@@ -41,17 +41,17 @@
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
......@@ -63,19 +63,19 @@ static inline void __down_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ahi %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
......@@ -91,7 +91,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: ltr %1,%0\n"
" jm 1f\n"
......@@ -99,7 +99,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" cs %0,%1,%2\n"
" jl 0b\n"
"1:"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: ltgr %1,%0\n"
" jm 1f\n"
......@@ -107,7 +107,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" csg %0,%1,%2\n"
" jl 0b\n"
"1:"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
......@@ -123,19 +123,19 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
......@@ -156,19 +156,19 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
signed long old;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%1\n"
"0: ltr %0,%0\n"
" jnz 1f\n"
" cs %0,%3,%1\n"
" jl 0b\n"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%1\n"
"0: ltgr %0,%0\n"
" jnz 1f\n"
" csg %0,%3,%1\n"
" jl 0b\n"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
"1:"
: "=&d" (old), "=Q" (sem->count)
: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
......@@ -184,19 +184,19 @@ static inline void __up_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ahi %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
......@@ -214,19 +214,19 @@ static inline void __up_write(struct rw_semaphore *sem)
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
......@@ -244,19 +244,19 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
tmp = -RWSEM_WAITING_BIAS;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
......@@ -272,19 +272,19 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ar %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
......@@ -298,19 +298,19 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
#ifndef __s390x__
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ar %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* __s390x__ */
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
......
......@@ -22,19 +22,19 @@
#include <asm/lowcore.h>
#include <asm/types.h>
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define IPL_DEVICE (*(unsigned long *) (0x10404))
#define INITRD_START (*(unsigned long *) (0x1040C))
#define INITRD_SIZE (*(unsigned long *) (0x10414))
#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
#define INITRD_SIZE (*(unsigned long *) (0x10410))
#define OLDMEM_BASE (*(unsigned long *) (0x10418))
#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define COMMAND_LINE ((char *) (0x10480))
#define CHUNK_READ_WRITE 0
......@@ -89,7 +89,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
#define MACHINE_HAS_IDTE (0)
......@@ -100,7 +100,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_PFMF (0)
#define MACHINE_HAS_SPP (0)
#define MACHINE_HAS_TOPOLOGY (0)
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
......@@ -111,7 +111,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20)
......@@ -153,19 +153,19 @@ extern void (*_machine_power_off)(void);
#else /* __ASSEMBLY__ */
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define IPL_DEVICE 0x10404
#define INITRD_START 0x1040C
#define INITRD_SIZE 0x10414
#define OLDMEM_BASE 0x1041C
#define OLDMEM_SIZE 0x10424
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#define IPL_DEVICE 0x10400
#define INITRD_START 0x10408
#define INITRD_SIZE 0x10410
#define OLDMEM_BASE 0x10418
#define OLDMEM_SIZE 0x10420
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define COMMAND_LINE 0x10480
#endif /* __ASSEMBLY__ */
......
......@@ -51,7 +51,7 @@
wl = __wl; \
})
#ifdef __s390x__
#ifdef CONFIG_64BIT
#define udiv_qrnnd(q, r, n1, n0, d) \
do { unsigned long __n; \
unsigned int __r, __d; \
......
......@@ -12,10 +12,10 @@
/*
* Size of kernel stack for each process
*/
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else /* __s390x__ */
#else /* CONFIG_64BIT */
#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
......@@ -23,7 +23,7 @@
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#endif
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
......
......@@ -106,7 +106,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
if (!tlb->fullmm)
......@@ -125,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
#ifdef __s390x__
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
if (!tlb->fullmm)
......
......@@ -27,12 +27,12 @@ static inline void __tlb_flush_global(void)
register unsigned long reg4 asm("4");
long dummy;
#ifndef __s390x__
#ifndef CONFIG_64BIT
if (!MACHINE_HAS_CSP) {
smp_ptlb_all();
return;
}
#endif /* __s390x__ */
#endif /* CONFIG_64BIT */
dummy = 0;
reg2 = reg3 = 0;
......
......@@ -28,7 +28,7 @@ typedef __signed__ long saddr_t;
#ifndef __ASSEMBLY__
#ifndef __s390x__
#ifndef CONFIG_64BIT
typedef union {
unsigned long long pair;
struct {
......@@ -37,7 +37,7 @@ typedef union {
} subreg;
} register_pair;
#endif /* ! __s390x__ */
#endif /* ! CONFIG_64BIT */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _S390_TYPES_H */
......@@ -14,7 +14,7 @@
#include <asm/futex.h>
#include "uaccess.h"
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define AHI "ahi"
#define ALR "alr"
#define CLR "clr"
......
......@@ -15,7 +15,7 @@
#include <asm/futex.h>
#include "uaccess.h"
#ifndef __s390x__
#ifndef CONFIG_64BIT
#define AHI "ahi"
#define ALR "alr"
#define CLR "clr"
......
......@@ -109,7 +109,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
#ifdef __s390x__
#ifdef CONFIG_64BIT
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
(address + HPAGE_SIZE <= start + size) &&
(address >= HPAGE_SIZE)) {
......
......@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
sccb.evbuf.event_qual = EQ_STORE_DATA;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
#ifdef __s390x__
#ifdef CONFIG_64BIT
sccb.evbuf.asa_size = ASA_SIZE_64;
#else
sccb.evbuf.asa_size = ASA_SIZE_32;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册