提交 5037be16 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:

 - A rework for the s390 arch random code, the TRNG instruction is
   rather slow and should not be used on the interrupt path

 - A fix for a memory leak in the zcrypt driver

 - Changes to the early boot code to add a compile time check for code
   that may not use the .bss section, with the goal to avoid initrd
   corruptions

 - Add an interface to get the physical network ID (pnetid), this is
   useful to group network devices that are attached to the same network

 - Some cleanup for the linker script

 - Some code improvement for the dasd driver

 - Two fixes for the perf sampling support

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/zcrypt: Fix CCA and EP11 CPRB processing failure memory leak.
  s390/archrandom: Rework arch random implementation.
  s390/net: add pnetid support
  s390/dasd: simplify locking in dasd_times_out
  s390/cio: add test for ccwgroup device
  s390/cio: add helper to query utility strings per given ccw device
  s390: remove no-op macro VMLINUX_SYMBOL()
  s390: remove closung punctuation from spectre messages
  s390: introduce compile time check for empty .bss section
  s390/early: move functions which may not access bss section to extra file
  s390/early: get rid of #ifdef CONFIG_BLK_DEV_INITRD
  s390/early: get rid of memmove_early
  s390/cpum_sf: Add data entry sizes to sampling trailer entry
  perf: fix invalid bit in diagnostic entry
......@@ -838,6 +838,10 @@ config CCW
source "drivers/Kconfig"
config HAVE_PNETID
tristate
default (SMC || CCWGROUP)
source "fs/Kconfig"
source "arch/s390/Kconfig.debug"
......
......@@ -2,14 +2,37 @@
/*
* s390 arch random implementation.
*
* Copyright IBM Corp. 2017
* Author(s): Harald Freudenberger <freude@de.ibm.com>
* Copyright IBM Corp. 2017, 2018
* Author(s): Harald Freudenberger
*
* The s390_arch_random_generate() function may be called from random.c
* in interrupt context. So this implementation does the best to be very
* fast. There is a buffer of random data which is asynchronously checked
* and filled by a workqueue thread.
* If there are enough bytes in the buffer the s390_arch_random_generate()
* just delivers these bytes. Otherwise false is returned until the
* worker thread refills the buffer.
* The worker fills the rng buffer by pulling fresh entropy from the
* high quality (but slow) true hardware random generator. This entropy
* is then spread over the buffer with an pseudo random generator PRNG.
* As the arch_get_random_seed_long() fetches 8 bytes and the calling
* function add_interrupt_randomness() counts this as 1 bit entropy the
* distribution needs to make sure there is in fact 1 bit entropy contained
* in 8 bytes of the buffer. The current values pull 32 byte entropy
* and scatter this into a 2048 byte buffer. So 8 byte in the buffer
* will contain 1 bit of entropy.
* The worker thread is rescheduled based on the charge level of the
* buffer but at least with 500 ms delay to avoid too much CPU consumption.
* So the max. amount of rng data delivered via arch_get_random_seed is
* limited to 4k bytes per second.
*/
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/static_key.h>
#include <linux/workqueue.h>
#include <asm/cpacf.h>
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
......@@ -17,11 +40,83 @@ DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
EXPORT_SYMBOL(s390_arch_random_counter);
#define ARCH_REFILL_TICKS (HZ/2)
#define ARCH_PRNG_SEED_SIZE 32
#define ARCH_RNG_BUF_SIZE 2048
static DEFINE_SPINLOCK(arch_rng_lock);
static u8 *arch_rng_buf;
static unsigned int arch_rng_buf_idx;
static void arch_rng_refill_buffer(struct work_struct *);
static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
{
/* lock rng buffer */
if (!spin_trylock(&arch_rng_lock))
return false;
/* try to resolve the requested amount of bytes from the buffer */
arch_rng_buf_idx -= nbytes;
if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
atomic64_add(nbytes, &s390_arch_random_counter);
spin_unlock(&arch_rng_lock);
return true;
}
/* not enough bytes in rng buffer, refill is done asynchronously */
spin_unlock(&arch_rng_lock);
return false;
}
EXPORT_SYMBOL(s390_arch_random_generate);
static void arch_rng_refill_buffer(struct work_struct *unused)
{
unsigned int delay = ARCH_REFILL_TICKS;
spin_lock(&arch_rng_lock);
if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
/* buffer is exhausted and needs refill */
u8 seed[ARCH_PRNG_SEED_SIZE];
u8 prng_wa[240];
/* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
cpacf_trng(NULL, 0, seed, sizeof(seed));
/* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
memset(prng_wa, 0, sizeof(prng_wa));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_wa, NULL, 0, seed, sizeof(seed));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
&prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
}
delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
spin_unlock(&arch_rng_lock);
/* kick next check */
queue_delayed_work(system_long_wq, &arch_rng_work, delay);
}
static int __init s390_arch_random_init(void)
{
/* check if subfunction CPACF_PRNO_TRNG is available */
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
/* all the needed PRNO subfunctions available ? */
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
/* alloc arch random working buffer */
arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
if (!arch_rng_buf)
return -ENOMEM;
/* kick worker queue job to fill the random buffer */
queue_delayed_work(system_long_wq,
&arch_rng_work, ARCH_REFILL_TICKS);
/* enable arch random to the outside world */
static_branch_enable(&s390_arch_random_available);
}
return 0;
}
......
......@@ -15,16 +15,11 @@
#include <linux/static_key.h>
#include <linux/atomic.h>
#include <asm/cpacf.h>
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
extern atomic64_t s390_arch_random_counter;
static void s390_arch_random_generate(u8 *buf, unsigned int nbytes)
{
cpacf_trng(NULL, 0, buf, nbytes);
atomic64_add(nbytes, &s390_arch_random_counter);
}
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
static inline bool arch_has_random(void)
{
......@@ -51,8 +46,7 @@ static inline bool arch_get_random_int(unsigned int *v)
static inline bool arch_get_random_seed_long(unsigned long *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
s390_arch_random_generate((u8 *)v, sizeof(*v));
return true;
return s390_arch_random_generate((u8 *)v, sizeof(*v));
}
return false;
}
......@@ -60,8 +54,7 @@ static inline bool arch_get_random_seed_long(unsigned long *v)
static inline bool arch_get_random_seed_int(unsigned int *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
s390_arch_random_generate((u8 *)v, sizeof(*v));
return true;
return s390_arch_random_generate((u8 *)v, sizeof(*v));
}
return false;
}
......
......@@ -231,4 +231,5 @@ int ccw_device_siosl(struct ccw_device *);
extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int);
u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx);
#endif /* _S390_CCWDEV_H_ */
......@@ -73,4 +73,14 @@ extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
#define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev)
#define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver)
#if IS_ENABLED(CONFIG_CCWGROUP)
bool dev_is_ccwgroup(struct device *dev);
#else /* CONFIG_CCWGROUP */
static inline bool dev_is_ccwgroup(struct device *dev)
{
return false;
}
#endif /* CONFIG_CCWGROUP */
#endif
......@@ -113,7 +113,7 @@ struct hws_basic_entry {
struct hws_diag_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int R:14; /* 16-19 and 20-30 reserved */
unsigned int R:15; /* 16-19 and 20-30 reserved */
unsigned int I:1; /* 31 entry valid or invalid */
u8 data[]; /* Machine-dependent sample data */
} __packed;
......@@ -129,7 +129,9 @@ struct hws_trailer_entry {
unsigned int f:1; /* 0 - Block Full Indicator */
unsigned int a:1; /* 1 - Alert request control */
unsigned int t:1; /* 2 - Timestamp format */
unsigned long long:61; /* 3 - 63: Reserved */
unsigned int :29; /* 3 - 31: Reserved */
unsigned int bsdes:16; /* 32-47: size of basic SDE */
unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
};
unsigned long long flags; /* 0 - 63: All indicators */
};
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* IBM System z PNET ID Support
*
* Copyright IBM Corp. 2018
*/
#ifndef _ASM_S390_PNET_H
#define _ASM_S390_PNET_H
#include <linux/device.h>
#include <linux/types.h>
#define PNETIDS_LEN 64 /* Total utility string length in bytes
* to cover up to 4 PNETIDs of 16 bytes
* for up to 4 device ports
*/
#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */
#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN)
/* Max. # of ports with a PNETID */
int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid);
#endif /* _ASM_S390_PNET_H */
......@@ -6,22 +6,26 @@
ifdef CONFIG_FUNCTION_TRACER
# Do not trace tracer code
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early_nobss.o = $(CC_FLAGS_FTRACE)
endif
GCOV_PROFILE_als.o := n
GCOV_PROFILE_early.o := n
GCOV_PROFILE_als.o := n
GCOV_PROFILE_early.o := n
GCOV_PROFILE_early_nobss.o := n
KCOV_INSTRUMENT_als.o := n
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_als.o := n
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_early_nobss.o := n
UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_early_nobss.o := n
#
# Use -march=z900 for als.c to be able to print an error
......@@ -57,7 +61,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o early_nobss.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
......@@ -94,3 +98,6 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
# vdso
obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
chkbss := head.o head64.o als.o early_nobss.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
......@@ -33,32 +33,6 @@
static void __init setup_boot_command_line(void);
/*
* Get the TOD clock running.
*/
static void __init reset_tod_clock(void)
{
u64 time;
if (store_tod_clock(&time) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
disabled_wait(0);
memset(tod_clock_base, 0, 16);
*(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
/*
* Clear bss memory
*/
static noinline __init void clear_bss_section(void)
{
memset(__bss_start, 0, __bss_stop - __bss_start);
}
/*
* Initialize storage key for kernel pages
*/
......@@ -310,57 +284,6 @@ static int __init cad_setup(char *str)
}
early_param("cad", cad_setup);
static __init void memmove_early(void *dst, const void *src, size_t n)
{
unsigned long addr;
long incr;
psw_t old;
if (!n)
return;
incr = 1;
if (dst > src) {
incr = -incr;
dst += n - 1;
src += n - 1;
}
old = S390_lowcore.program_new_psw;
S390_lowcore.program_new_psw.mask = __extract_psw();
asm volatile(
" larl %[addr],1f\n"
" stg %[addr],%[psw_pgm_addr]\n"
"0: mvc 0(1,%[dst]),0(%[src])\n"
" agr %[dst],%[incr]\n"
" agr %[src],%[incr]\n"
" brctg %[n],0b\n"
"1:\n"
: [addr] "=&d" (addr),
[psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
[dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
: [incr] "d" (incr)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
}
static __init noinline void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
/*
* Just like in case of IPL from VM reader we make sure there is a
* gap of 4MB between end of kernel and start of initrd.
* That way we can also be sure that saving an NSS will succeed,
* which however only requires different segments.
*/
if (!INITRD_START || !INITRD_SIZE)
return;
if (INITRD_START >= min_initrd_addr)
return;
memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
#endif
}
/* Set up boot command line */
static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
{
......@@ -410,9 +333,6 @@ static void __init setup_boot_command_line(void)
void __init startup_init(void)
{
reset_tod_clock();
rescue_initrd();
clear_bss_section();
time_early_init();
init_kernel_storage_key();
lockdep_off();
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2018
*/
/*
* Early setup functions which may not rely on an initialized bss
* section. The last thing that is supposed to happen here is
* initialization of the bss section.
*/
#include <linux/processor.h>
#include <linux/string.h>
#include <asm/sections.h>
#include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/timex.h>
#include "entry.h"
static void __init reset_tod_clock(void)
{
u64 time;
if (store_tod_clock(&time) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
disabled_wait(0);
memset(tod_clock_base, 0, 16);
*(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
static void __init rescue_initrd(void)
{
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
/*
* Just like in case of IPL from VM reader we make sure there is a
* gap of 4MB between end of kernel and start of initrd.
* That way we can also be sure that saving an NSS will succeed,
* which however only requires different segments.
*/
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
if (INITRD_START >= min_initrd_addr)
return;
memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
}
static void __init clear_bss_section(void)
{
memset(__bss_start, 0, __bss_stop - __bss_start);
}
void __init startup_init_nobss(void)
{
reset_tod_clock();
rescue_initrd();
clear_bss_section();
}
......@@ -58,6 +58,7 @@ void do_notify_resume(struct pt_regs *regs);
void __init init_IRQ(void);
void do_IRQ(struct pt_regs *regs, int irq);
void do_restart(void);
void __init startup_init_nobss(void);
void __init startup_init(void);
void die(struct pt_regs *regs, const char *str);
int setup_profiling_timer(unsigned int multiplier);
......
......@@ -40,8 +40,12 @@ ENTRY(startup_continue)
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
#
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
# and create a kernel NSS if the SAVESYS= parm is defined
# Early setup functions that may not rely on an initialized bss section,
# like moving the initrd. Returns with an initialized bss section.
#
brasl %r14,startup_init_nobss
#
# Early machine initialization and detection functions.
#
brasl %r14,startup_init
lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
......
......@@ -36,9 +36,9 @@ early_param("nospec", nospec_setup_early);
static int __init nospec_report(void)
{
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines.\n");
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction.\n");
pr_info("Spectre V2 mitigation: limited branch prediction\n");
return 0;
}
arch_initcall(nospec_report);
......
......@@ -82,10 +82,10 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
VMLINUX_SYMBOL(_sinittext) = . ;
_sinittext = .;
INIT_TEXT
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einittext) = . ;
_einittext = .;
}
/*
......
......@@ -8,3 +8,6 @@ obj-y += mem.o xor.o
lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
chkbss := mem.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
......@@ -3,3 +3,4 @@
# Arch-specific network modules
#
obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
obj-$(CONFIG_HAVE_PNETID) += pnet.o
// SPDX-License-Identifier: GPL-2.0
/*
* IBM System z PNET ID Support
*
* Copyright IBM Corp. 2018
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/ccwgroup.h>
#include <asm/ccwdev.h>
#include <asm/pnet.h>
/*
* Get the PNETIDs from a device.
* s390 hardware supports the definition of a so-called Physical Network
* Identifier (short PNETID) per network device port. These PNETIDs can be
* used to identify network devices that are attached to the same physical
* network (broadcast domain).
*
* The device can be
* - a ccwgroup device with all bundled subchannels having the same PNETID
* - a PCI attached network device
*
* Returns:
* 0: PNETIDs extracted from device.
* -ENOMEM: No memory to extract utility string.
* -EOPNOTSUPP: Device type without utility string support
*/
static int pnet_ids_by_device(struct device *dev, u8 *pnetids)
{
memset(pnetids, 0, PNETIDS_LEN);
if (dev_is_ccwgroup(dev)) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
u8 *util_str;
util_str = ccw_device_get_util_str(gdev->cdev[0], 0);
if (!util_str)
return -ENOMEM;
memcpy(pnetids, util_str, PNETIDS_LEN);
kfree(util_str);
return 0;
}
if (dev_is_pci(dev)) {
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
memcpy(pnetids, zdev->util_str, sizeof(zdev->util_str));
return 0;
}
return -EOPNOTSUPP;
}
/*
* Extract the pnetid for a device port.
*
* Return 0 if a pnetid is found and -ENOENT otherwise.
*/
int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid)
{
u8 pnetids[MAX_PNETID_PORTS][MAX_PNETID_LEN];
static const u8 zero[MAX_PNETID_LEN] = { 0 };
int rc = 0;
if (!dev || port >= MAX_PNETID_PORTS)
return -ENOENT;
if (!pnet_ids_by_device(dev, (u8 *)pnetids) &&
memcmp(pnetids[port], zero, MAX_PNETID_LEN))
memcpy(pnetid, pnetids[port], MAX_PNETID_LEN);
else
rc = -ENOENT;
return rc;
}
EXPORT_SYMBOL_GPL(pnet_id_by_dev_port);
MODULE_DESCRIPTION("pnetid determination from utility strings");
MODULE_LICENSE("GPL");
# SPDX-License-Identifier: GPL-2.0
quiet_cmd_chkbss = CHKBSS $<
define cmd_chkbss
if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
endef
$(obj)/built-in.a: $(patsubst %, $(obj)/%.chkbss, $(chkbss))
%.o.chkbss: %.o
$(call cmd,chkbss)
......@@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
* Cancellation of a request is an asynchronous operation! The calling
* function has to wait until the request is properly returned via callback.
*/
int dasd_cancel_req(struct dasd_ccw_req *cqr)
static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
int rc;
int rc = 0;
rc = 0;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
switch (cqr->status) {
case DASD_CQR_QUEUED:
/* request was not started - just set to cleared */
......@@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
default: /* already finished or clear pending - do nothing */
break;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device);
return rc;
}
EXPORT_SYMBOL(dasd_cancel_req);
int dasd_cancel_req(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
int rc;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
rc = __dasd_cancel_req(cqr);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return rc;
}
/*
* SECTION: Operations of the dasd_block layer.
......@@ -3084,12 +3091,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
cqr->retries = -1;
cqr->intrc = -ETIMEDOUT;
if (cqr->status >= DASD_CQR_QUEUED) {
spin_unlock(get_ccwdev_lock(device->cdev));
rc = dasd_cancel_req(cqr);
rc = __dasd_cancel_req(cqr);
} else if (cqr->status == DASD_CQR_FILLED ||
cqr->status == DASD_CQR_NEED_ERP) {
cqr->status = DASD_CQR_TERMINATED;
spin_unlock(get_ccwdev_lock(device->cdev));
} else if (cqr->status == DASD_CQR_IN_ERP) {
struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
......@@ -3104,9 +3109,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
searchcqr->retries = -1;
searchcqr->intrc = -ETIMEDOUT;
if (searchcqr->status >= DASD_CQR_QUEUED) {
spin_unlock(get_ccwdev_lock(device->cdev));
rc = dasd_cancel_req(searchcqr);
spin_lock(get_ccwdev_lock(device->cdev));
rc = __dasd_cancel_req(searchcqr);
} else if ((searchcqr->status == DASD_CQR_FILLED) ||
(searchcqr->status == DASD_CQR_NEED_ERP)) {
searchcqr->status = DASD_CQR_TERMINATED;
......@@ -3120,8 +3123,8 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
}
break;
}
spin_unlock(get_ccwdev_lock(device->cdev));
}
spin_unlock(get_ccwdev_lock(device->cdev));
dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&cqr->dq->lock, flags);
......
......@@ -54,3 +54,6 @@ obj-$(CONFIG_CRASH_DUMP) += sclp_sdias.o zcore.o
hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
obj-$(CONFIG_HMC_DRV) += hmcdrv.o
chkbss := sclp_early_core.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
......@@ -561,6 +561,12 @@ static struct bus_type ccwgroup_bus_type = {
.pm = &ccwgroup_pm_ops,
};
bool dev_is_ccwgroup(struct device *dev)
{
return dev->bus == &ccwgroup_bus_type;
}
EXPORT_SYMBOL(dev_is_ccwgroup);
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
......
......@@ -472,6 +472,36 @@ struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
return chp_get_chp_desc(chpid);
}
/**
* ccw_device_get_util_str() - return newly allocated utility strings
* @cdev: device to obtain the utility strings for
* @chp_idx: index of the channel path
*
* On success return a newly allocated copy of the utility strings
* associated with the given channel path. Return %NULL on error.
*/
u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct channel_path *chp;
struct chp_id chpid;
u8 *util_str;
chp_id_init(&chpid);
chpid.id = sch->schib.pmcw.chpid[chp_idx];
chp = chpid_to_chp(chpid);
util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
if (!util_str)
return NULL;
mutex_lock(&chp->lock);
memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
mutex_unlock(&chp->lock);
return util_str;
}
/**
* ccw_device_get_id() - obtain a ccw device id
* @cdev: device to obtain the id for
......@@ -682,3 +712,4 @@ EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
......@@ -198,11 +198,18 @@ struct ap_message {
*/
static inline void ap_init_message(struct ap_message *ap_msg)
{
ap_msg->psmid = 0;
ap_msg->length = 0;
ap_msg->rc = 0;
ap_msg->special = 0;
ap_msg->receive = NULL;
memset(ap_msg, 0, sizeof(*ap_msg));
}
/**
* ap_release_message() - Release ap_message.
* Releases all memory used internal within the ap_message struct
* Currently this is the message and private field.
*/
static inline void ap_release_message(struct ap_message *ap_msg)
{
kzfree(ap_msg->message);
kzfree(ap_msg->private);
}
#define for_each_ap_card(_ac) \
......
......@@ -371,6 +371,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
ap_init_message(&ap_msg);
rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
if (rc)
goto out;
......@@ -425,6 +426,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
spin_unlock(&zcrypt_list_lock);
out:
ap_release_message(&ap_msg);
trace_s390_zcrypt_rep(xcRB, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
......@@ -468,6 +470,8 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
ap_init_message(&ap_msg);
target_num = (unsigned short) xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
......@@ -485,7 +489,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
if (copy_from_user(targets, uptr,
target_num * sizeof(*targets))) {
rc = -EFAULT;
goto out;
goto out_free;
}
}
......@@ -542,6 +546,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
out_free:
kfree(targets);
out:
ap_release_message(&ap_msg);
trace_s390_zcrypt_rep(xcrb, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
......@@ -559,6 +564,7 @@ static long zcrypt_rng(char *buffer)
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
ap_init_message(&ap_msg);
rc = get_rng_fc(&ap_msg, &func_code, &domain);
if (rc)
goto out;
......@@ -589,8 +595,10 @@ static long zcrypt_rng(char *buffer)
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq)
return -ENODEV;
if (!pref_zq) {
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
......@@ -600,6 +608,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
out:
ap_release_message(&ap_msg);
trace_s390_zcrypt_rep(buffer, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
......
......@@ -1084,6 +1084,13 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
return rc;
}
/**
* Fetch function code from cprb.
* Extracting the fc requires to copy the cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
* by the caller with ap_init_message(). Also the caller has to
* make sure ap_release_message() is always called even on failure.
*/
unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned short **dom)
......@@ -1091,9 +1098,7 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
int rc;
ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
......@@ -1101,17 +1106,10 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private) {
kzfree(ap_msg->message);
if (!ap_msg->private)
return -ENOMEM;
}
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
if (rc) {
kzfree(ap_msg->message);
kzfree(ap_msg->private);
}
return rc;
return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
}
/**
......@@ -1139,11 +1137,16 @@ static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
kzfree(ap_msg->message);
kzfree(ap_msg->private);
return rc;
}
/**
* Fetch function code from ep11 cprb.
* Extracting the fc requires to copy the ep11 cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
* by the caller with ap_init_message(). Also the caller has to
* make sure ap_release_message() is always called even on failure.
*/
unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code)
......@@ -1151,9 +1154,7 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_EP11,
};
int rc;
ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
......@@ -1161,17 +1162,10 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private) {
kzfree(ap_msg->message);
if (!ap_msg->private)
return -ENOMEM;
}
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
if (rc) {
kzfree(ap_msg->message);
kzfree(ap_msg->private);
}
return rc;
return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
}
/**
......@@ -1246,8 +1240,6 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
kzfree(ap_msg->message);
kzfree(ap_msg->private);
return rc;
}
......@@ -1258,7 +1250,6 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
......@@ -1266,10 +1257,8 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private) {
kzfree(ap_msg->message);
if (!ap_msg->private)
return -ENOMEM;
}
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
......@@ -1313,8 +1302,6 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
kzfree(ap_msg->message);
kzfree(ap_msg->private);
return rc;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册