提交 bd4f203e 编写于 作者: L Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge third patch-bomb from Andrew Morton:
 "We're pretty much done over here - I'm still waiting for a nouveau
  merge so I can cleanly finish up Christoph's dma-mapping rework.

   - bunch of small misc stuff

   - fold abs64() into abs(), remove abs64()

   - new_valid_dev() cleanups

   - binfmt_elf_fdpic feature work"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (24 commits)
  fs/binfmt_elf_fdpic.c: provide NOMMU loader for regular ELF binaries
  fs/stat.c: remove unnecessary new_valid_dev() check
  fs/reiserfs/namei.c: remove unnecessary new_valid_dev() check
  fs/nilfs2/namei.c: remove unnecessary new_valid_dev() check
  fs/ncpfs/dir.c: remove unnecessary new_valid_dev() check
  fs/jfs: remove unnecessary new_valid_dev() checks
  fs/hpfs/namei.c: remove unnecessary new_valid_dev() check
  fs/f2fs/namei.c: remove unnecessary new_valid_dev() check
  fs/ext2/namei.c: remove unnecessary new_valid_dev() check
  fs/exofs/namei.c: remove unnecessary new_valid_dev() check
  fs/btrfs/inode.c: remove unnecessary new_valid_dev() check
  fs/9p: remove unnecessary new_valid_dev() checks
  include/linux/kdev_t.h: old/new_valid_dev() can return bool
  include/linux/kdev_t.h: remove unused huge_valid_dev()
  kmap_atomic_to_page() has no users, remove it
  drivers/scsi/cxgbi: fix build with EXTRA_CFLAGS
  dma: remove external references to dma_supported
  Documentation/sysctl/vm.txt: fix misleading code reference of overcommit_memory
  remove abs64()
  kernel.h: make abs() work with 64-bit types
  ...
......@@ -141,19 +141,6 @@ memory back to the pool before you destroy it.
Part Ic - DMA addressing limitations
------------------------------------
int
dma_supported(struct device *dev, u64 mask)
Checks to see if the device can support DMA to the memory described by
mask.
Returns: 1 if it can and 0 if it can't.
Notes: This routine merely tests to see if the mask is possible. It
won't change the current mask settings. It is more intended as an
internal API for use by the platform than an external API for use by
driver writers.
int
dma_set_mask_and_coherent(struct device *dev, u64 mask)
......
......@@ -639,7 +639,7 @@ and don't use much of it.
The default value is 0.
See Documentation/vm/overcommit-accounting and
security/commoncap.c::cap_vm_enough_memory() for more information.
mm/mmap.c::__vm_enough_memory() for more information.
==============================================================
......
......@@ -68,7 +68,6 @@ extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
#endif
#endif
......@@ -147,13 +147,3 @@ void *kmap_atomic_pfn(unsigned long pfn)
return (void *)vaddr;
}
struct page *kmap_atomic_to_page(const void *ptr)
{
unsigned long vaddr = (unsigned long)ptr;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
return pte_page(get_fixmap_pte(vaddr));
}
......@@ -62,8 +62,6 @@ extern void kunmap_high(struct page *page);
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern struct page *kmap_atomic_to_page(void *ptr);
#endif /* !__ASSEMBLY__ */
/*
......
......@@ -32,11 +32,6 @@ void kunmap(struct page *page)
EXPORT_SYMBOL(kunmap);
struct page *kmap_atomic_to_page(void *ptr)
{
return virt_to_page(ptr);
}
void *kmap_atomic(struct page *page)
{
unsigned long paddr;
......
......@@ -56,7 +56,6 @@ extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
#endif
#endif
......@@ -111,20 +111,6 @@ void *kmap_atomic_pfn(unsigned long pfn)
return (void *)vaddr;
}
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long vaddr = (unsigned long)ptr;
int idx;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
void __init kmap_init(void)
{
unsigned long kmap_vstart;
......
......@@ -76,19 +76,6 @@ static inline void *kmap_atomic(struct page *page)
return kmap_atomic_prot(page, kmap_prot);
}
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long) ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
#endif /* __KERNEL__ */
......
......@@ -49,7 +49,6 @@ extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() flush_cache_all()
......
......@@ -379,16 +379,17 @@
#define __NR_execveat (__NR_Linux + 356)
#define __NR_userfaultfd (__NR_Linux + 357)
#define __NR_membarrier (__NR_Linux + 358)
#define __NR_mlock2 (__NR_Linux + 359)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 358
#define __NR_Linux_syscalls 359
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 358
#define __NR_O32_Linux_syscalls 359
#if _MIPS_SIM == _MIPS_SIM_ABI64
......@@ -715,16 +716,17 @@
#define __NR_execveat (__NR_Linux + 316)
#define __NR_userfaultfd (__NR_Linux + 317)
#define __NR_membarrier (__NR_Linux + 318)
#define __NR_mlock2 (__NR_Linux + 319)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 318
#define __NR_Linux_syscalls 319
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 318
#define __NR_64_Linux_syscalls 319
#if _MIPS_SIM == _MIPS_SIM_NABI32
......@@ -1055,15 +1057,16 @@
#define __NR_execveat (__NR_Linux + 320)
#define __NR_userfaultfd (__NR_Linux + 321)
#define __NR_membarrier (__NR_Linux + 322)
#define __NR_mlock2 (__NR_Linux + 323)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 322
#define __NR_Linux_syscalls 323
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 322
#define __NR_N32_Linux_syscalls 323
#endif /* _UAPI_ASM_UNISTD_H */
......@@ -594,3 +594,4 @@ EXPORT(sys_call_table)
PTR sys_execveat
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
......@@ -432,4 +432,5 @@ EXPORT(sys_call_table)
PTR sys_execveat
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
.size sys_call_table,.-sys_call_table
......@@ -422,4 +422,5 @@ EXPORT(sysn32_call_table)
PTR compat_sys_execveat /* 6320 */
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
.size sysn32_call_table,.-sysn32_call_table
......@@ -577,4 +577,5 @@ EXPORT(sys32_call_table)
PTR compat_sys_execveat
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
.size sys32_call_table,.-sys32_call_table
......@@ -118,19 +118,6 @@ void *kmap_atomic_pfn(unsigned long pfn)
return (void*) vaddr;
}
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
void __init kmap_init(void)
{
unsigned long kmap_vstart;
......
......@@ -156,7 +156,6 @@ static inline void __kunmap_atomic(void *addr)
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif /* _PARISC_CACHEFLUSH_H */
......@@ -84,19 +84,6 @@ static inline void *kmap_atomic(struct page *page)
return kmap_atomic_prot(page, kmap_prot);
}
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long) ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
#define flush_cache_kmaps() flush_cache_all()
......
......@@ -416,8 +416,9 @@
#define __NR_memfd_create 348
#define __NR_bpf 349
#define __NR_execveat 350
#define __NR_membarrier 351
#define NR_syscalls 351
#define NR_syscalls 352
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
......
......@@ -87,4 +87,4 @@ sys_call_table:
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat
/*350*/ .long sys_execveat, sys_membarrier
......@@ -88,7 +88,7 @@ sys_call_table32:
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat
/*350*/ .word sys32_execveat, sys_membarrier
#endif /* CONFIG_COMPAT */
......@@ -168,4 +168,4 @@ sys_call_table:
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat
/*350*/ .word sys64_execveat, sys_membarrier
......@@ -63,7 +63,6 @@ void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kmap_atomic_fix_kpte(struct page *page, int finished);
......
......@@ -275,15 +275,3 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
return kmap_atomic_prot(pfn_to_page(pfn), prot);
}
struct page *kmap_atomic_to_page(void *ptr)
{
pte_t *pte;
unsigned long vaddr = (unsigned long)ptr;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
pte = kmap_get_pte(vaddr);
return pte_page(*pte);
}
......@@ -68,7 +68,6 @@ void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do { } while (0)
......
......@@ -104,20 +104,6 @@ void __kunmap_atomic(void *kvaddr)
}
EXPORT_SYMBOL(__kunmap_atomic);
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
EXPORT_SYMBOL(kmap_atomic_to_page);
void __init set_highmem_pages_init(void)
{
struct zone *zone;
......
......@@ -261,7 +261,7 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
* available. In that case we can't account for this and just
* hope for the best.
*/
if (vblrc && (abs64(diff_ns) > 1000000))
if (vblrc && (abs(diff_ns) > 1000000))
store_vblank(dev, pipe, 1, &tvblank);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
......@@ -1772,7 +1772,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
* e.g., due to spurious vblank interrupts. We need to
* ignore those for accounting.
*/
if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
store_vblank(dev, pipe, 1, &tvblank);
else
DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
......
......@@ -555,11 +555,11 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
error = div_s64(active_sym - approx, tu_size);
error *= params->num_clocks;
if (error <= 0 && abs64(error) < params->error) {
if (error <= 0 && abs(error) < params->error) {
params->active_count = div_u64(active_count, f);
params->active_polarity = active_polarity;
params->active_frac = active_frac;
params->error = abs64(error);
params->error = abs(error);
params->tu_size = tu_size;
if (error == 0)
......
......@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
if (w->counter == 24) { /* full frame */
walkera0701_parse_frame(w);
w->counter = NO_SYNC;
if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
w->counter = 0;
} else {
if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
......@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
} else
w->counter = NO_SYNC;
}
} else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
} else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) <
RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
w->counter = 0;
......
......@@ -1133,7 +1133,7 @@ static int __ov965x_set_frame_interval(struct ov965x *ov965x,
if (mbus_fmt->width != iv->size.width ||
mbus_fmt->height != iv->size.height)
continue;
err = abs64((u64)(iv->interval.numerator * 10000) /
err = abs((u64)(iv->interval.numerator * 10000) /
iv->interval.denominator - req_int);
if (err < min_err) {
fiv = iv;
......
......@@ -787,7 +787,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
struct mac80211_hwsim_data *data = hw->priv;
u64 now = mac80211_hwsim_get_tsf(hw, vif);
u32 bcn_int = data->beacon_int;
u64 delta = abs64(tsf - now);
u64 delta = abs(tsf - now);
/* adjust after beaconing with new timestamp at old TBTT */
if (tsf > now) {
......
EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3
ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o
......@@ -228,7 +228,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
if (err < int_to_frac(tz->tzp->integral_cutoff)) {
s64 i_next = i + mul_frac(tz->tzp->k_i, err);
if (abs64(i_next) < max_power_frac) {
if (abs(i_next) < max_power_frac) {
i = i_next;
params->err_integral += err;
}
......
......@@ -589,7 +589,7 @@ static int ehci_run (struct usb_hcd *hcd)
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dma_supported(), so
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
......
......@@ -5063,7 +5063,7 @@ static int fotg210_run(struct usb_hcd *hcd)
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dma_supported(), so
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
......
......@@ -2721,7 +2721,7 @@ static int oxu_run(struct usb_hcd *hcd)
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dma_supported(), so
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
......
......@@ -1368,9 +1368,6 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rde
dir->i_ino, dentry, mode,
MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
return -EINVAL;
/* build extension */
if (S_ISBLK(mode))
sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev));
......
......@@ -829,9 +829,6 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dir->i_ino, dentry, omode,
MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
return -EINVAL;
v9ses = v9fs_inode2v9ses(dir);
dir_dentry = dentry->d_parent;
dfid = v9fs_fid_lookup(dir_dentry);
......
......@@ -103,19 +103,36 @@ static void __exit exit_elf_fdpic_binfmt(void)
core_initcall(init_elf_fdpic_binfmt);
module_exit(exit_elf_fdpic_binfmt);
static int is_elf_fdpic(struct elfhdr *hdr, struct file *file)
static int is_elf(struct elfhdr *hdr, struct file *file)
{
if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0)
return 0;
if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)
return 0;
if (!elf_check_arch(hdr) || !elf_check_fdpic(hdr))
if (!elf_check_arch(hdr))
return 0;
if (!file->f_op->mmap)
return 0;
return 1;
}
#ifndef elf_check_fdpic
#define elf_check_fdpic(x) 0
#endif
#ifndef elf_check_const_displacement
#define elf_check_const_displacement(x) 0
#endif
static int is_constdisp(struct elfhdr *hdr)
{
if (!elf_check_fdpic(hdr))
return 1;
if (elf_check_const_displacement(hdr))
return 1;
return 0;
}
/*****************************************************************************/
/*
* read the program headers table into memory
......@@ -191,8 +208,18 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
/* check that this is a binary we know how to deal with */
retval = -ENOEXEC;
if (!is_elf_fdpic(&exec_params.hdr, bprm->file))
if (!is_elf(&exec_params.hdr, bprm->file))
goto error;
if (!elf_check_fdpic(&exec_params.hdr)) {
#ifdef CONFIG_MMU
/* binfmt_elf handles non-fdpic elf except on nommu */
goto error;
#else
/* nommu can only load ET_DYN (PIE) ELF */
if (exec_params.hdr.e_type != ET_DYN)
goto error;
#endif
}
/* read the program header table */
retval = elf_fdpic_fetch_phdrs(&exec_params, bprm->file);
......@@ -269,13 +296,13 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
}
if (elf_check_const_displacement(&exec_params.hdr))
if (is_constdisp(&exec_params.hdr))
exec_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
/* perform insanity checks on the interpreter */
if (interpreter_name) {
retval = -ELIBBAD;
if (!is_elf_fdpic(&interp_params.hdr, interpreter))
if (!is_elf(&interp_params.hdr, interpreter))
goto error;
interp_params.flags = ELF_FDPIC_FLAG_PRESENT;
......@@ -306,9 +333,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
retval = -ENOEXEC;
if (stack_size == 0)
goto error;
stack_size = 131072UL; /* same as exec.c's default commit */
if (elf_check_const_displacement(&interp_params.hdr))
if (is_constdisp(&interp_params.hdr))
interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
/* flush all traces of the currently running executable */
......@@ -319,7 +346,10 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
/* there's now no turning back... the old userspace image is dead,
* defunct, deceased, etc.
*/
set_personality(PER_LINUX_FDPIC);
if (elf_check_fdpic(&exec_params.hdr))
set_personality(PER_LINUX_FDPIC);
else
set_personality(PER_LINUX);
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
......
......@@ -6360,9 +6360,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
u64 objectid;
u64 index = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
/*
* 2 for inode item and ref
* 2 for dir items
......
......@@ -80,9 +80,6 @@ static int exofs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
struct inode *inode;
int err;
if (!new_valid_dev(rdev))
return -EINVAL;
inode = exofs_new_inode(dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
......
......@@ -143,9 +143,6 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode,
struct inode * inode;
int err;
if (!new_valid_dev(rdev))
return -EINVAL;
err = dquot_initialize(dir);
if (err)
return err;
......
......@@ -3333,8 +3333,8 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
atomic_inc(&pa->pa_count);
return pa;
}
cur_distance = abs64(goal_block - cpa->pa_pstart);
new_distance = abs64(goal_block - pa->pa_pstart);
cur_distance = abs(goal_block - cpa->pa_pstart);
new_distance = abs(goal_block - pa->pa_pstart);
if (cur_distance <= new_distance)
return cpa;
......
......@@ -481,9 +481,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
f2fs_balance_fs(sbi);
inode = f2fs_new_inode(dir, mode);
......
......@@ -1981,9 +1981,9 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
* page->mapping->host, so the page-dirtying time is recorded in the internal
* blockdev inode.
*/
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
void __mark_inode_dirty(struct inode *inode, int flags)
{
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
struct super_block *sb = inode->i_sb;
int dirtytime;
......@@ -2093,6 +2093,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
out_unlock_inode:
spin_unlock(&inode->i_lock);
#undef I_DIRTY_INODE
}
EXPORT_SYMBOL(__mark_inode_dirty);
......
......@@ -50,7 +50,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
s64 delta = sample - s->stats[index];
s->stats[index] += (delta >> 3);
index++;
s->stats[index] += ((abs64(delta) - s->stats[index]) >> 2);
s->stats[index] += ((abs(delta) - s->stats[index]) >> 2);
}
/**
......
......@@ -227,8 +227,6 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, de
int err;
if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM;
if (!new_valid_dev(rdev))
return -EINVAL;
hpfs_lock(dir->i_sb);
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
......
......@@ -1597,6 +1597,7 @@ static int update_time(struct inode *inode, struct timespec *time, int flags)
/**
* touch_atime - update the access time
* @path: the &struct path to update
* @inode: inode to update
*
* Update the accessed time on an inode and mark it for writeback.
* This function automatically handles read only file systems and media,
......
......@@ -1372,9 +1372,6 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
tid_t tid;
struct tblock *tblk;
if (!new_valid_dev(rdev))
return -EINVAL;
jfs_info("jfs_mknod: %pd", dentry);
rc = dquot_initialize(dir);
......
......@@ -496,9 +496,6 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
if (!new_valid_dev(sb->s_bdev->bd_dev))
return -EOVERFLOW;
sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
......
......@@ -1165,8 +1165,6 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
static int ncp_mknod(struct inode * dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
if (!new_valid_dev(rdev))
return -EINVAL;
if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) {
ncp_dbg(1, "mode = 0%ho\n", mode);
return ncp_create_new(dir, dentry, mode, rdev, 0);
......
......@@ -120,9 +120,6 @@ nilfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
struct nilfs_transaction_info ti;
int err;
if (!new_valid_dev(rdev))
return -EINVAL;
err = nilfs_transaction_begin(dir->i_sb, &ti, 1);
if (err)
return err;
......
......@@ -712,9 +712,6 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
if (!new_valid_dev(rdev))
return -EINVAL;
retval = dquot_initialize(dir);
if (retval)
return retval;
......
......@@ -367,8 +367,6 @@ static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
INIT_STRUCT_STAT64_PADDING(tmp);
#ifdef CONFIG_MIPS
/* mips has weird padding, so we don't get 64 bits there */
if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
return -EOVERFLOW;
tmp.st_dev = new_encode_dev(stat->dev);
tmp.st_rdev = new_encode_dev(stat->rdev);
#else
......
......@@ -78,7 +78,6 @@ static inline void __kunmap_atomic(void *addr)
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
#endif
......
......@@ -20,7 +20,7 @@
})
/* acceptable for old filesystems */
static inline int old_valid_dev(dev_t dev)
static inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
......@@ -35,7 +35,7 @@ static inline dev_t old_decode_dev(u16 val)
return MKDEV((val >> 8) & 255, val & 255);
}
static inline int new_valid_dev(dev_t dev)
static inline bool new_valid_dev(dev_t dev)
{
return 1;
}
......@@ -54,11 +54,6 @@ static inline dev_t new_decode_dev(u32 dev)
return MKDEV(major, minor);
}
static inline int huge_valid_dev(dev_t dev)
{
return 1;
}
static inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
......
......@@ -200,28 +200,28 @@ extern int _cond_resched(void);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
/*
* abs() handles unsigned and signed longs, ints, shorts and chars. For all
* input types abs() returns a signed long.
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
* for those.
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first
* (s64, long or int depending on its size).
*
* Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
* otherwise it is signed long.
*/
#define abs(x) ({ \
long ret; \
if (sizeof(x) == sizeof(long)) { \
long __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} else { \
int __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
})
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
}), ({ \
long ret; \
if (sizeof(x) == sizeof(long)) { \
long __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} else { \
int __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
}))
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
......
......@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data)
continue;
/* Check the deviation from the watchdog clocksource. */
if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
cs->name);
pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
......
......@@ -1614,7 +1614,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
negative = (tick_error < 0);
/* Sort out the magnitude of the correction */
tick_error = abs64(tick_error);
tick_error = abs(tick_error);
for (adj = 0; tick_error > interval; adj++)
tick_error >>= 1;
......
......@@ -162,7 +162,7 @@ s64 div64_s64(s64 dividend, s64 divisor)
{
s64 quot, t;
quot = div64_u64(abs64(dividend), abs64(divisor));
quot = div64_u64(abs(dividend), abs(divisor));
t = (dividend ^ divisor) >> 63;
return (quot ^ t) - t;
......
......@@ -331,7 +331,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
* 1/8, rto_alpha would be expressed as 3.
*/
tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
+ (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
+ (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+ (rtt >> net->sctp.rto_alpha);
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册