提交 57ca04ab 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull m ore s390 updates from Martin Schwidefsky:
 "Over 95% of the changes in this pull request are related to the zcrypt
  driver. There are five improvements for zcrypt: the ID for the CEX6
  cards is added, workload balancing and multi-domain support are
  introduced, the debug logs are overhauled and a set of tracepoints is
  added.

  Then there are several patches in regard to inline assemblies. One
  compile fix and several missing memory clobbers. As far as we can tell
  the omitted memory clobbers have not caused any breakage.

  A small change to the PCI arch code, the machine can tells us how big
  the function measurement blocks are. The PCI function measurement will
  be disabled for a device if the queried length is larger than the
  allocated size for these blocks.

  And two more patches to correct five printk messages.

  That is it for s390 in regard to the 4.10 merge window. Happy holidays"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits)
  s390/pci: query fmb length
  s390/zcrypt: add missing memory clobber to ap_qci inline assembly
  s390/extmem: add missing memory clobber to dcss_set_subcodes
  s390/nmi: fix inline assembly constraints
  s390/lib: add missing memory barriers to string inline assemblies
  s390/cpumf: fix qsi inline assembly
  s390/setup: reword printk messages
  s390/dasd: fix typos in DASD error messages
  s390: fix compile error with memmove_early() inline assembly
  s390/zcrypt: tracepoint definitions for zcrypt device driver.
  s390/zcrypt: Rework debug feature invocations.
  s390/zcrypt: Improved invalid domain response handling.
  s390/zcrypt: Fix ap_max_domain_id for older machine types
  s390/zcrypt: Correct function bits for CEX2x and CEX3x cards.
  s390/zcrypt: Fixed attrition of AP adapters and domains
  s390/zcrypt: Introduce new zcrypt device status API
  s390/zcrypt: add multi domain support
  s390/zcrypt: Introduce workload balancing
  s390/zcrypt: get rid of ap_poll_requests
  s390/zcrypt: header for the AP inline assmblies
  ...
......@@ -213,18 +213,14 @@ static inline int stcctm5(u64 num, u64 *val)
/* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info)
{
int cc;
cc = 1;
int cc = 1;
asm volatile(
"0: .insn s,0xb2860000,0(%1)\n"
"0: .insn s,0xb2860000,%1\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (cc), "+a" (info)
: "m" (*info)
: "cc", "memory");
: "+d" (cc), "+Q" (*info));
return cc ? -EINVAL : 0;
}
......
......@@ -133,6 +133,7 @@ struct zpci_dev {
/* Function measurement block */
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
u16 fmb_length;
/* software counters */
atomic64_t allocated_pages;
atomic64_t mapped_pages;
......
......@@ -87,7 +87,8 @@ struct clp_rsp_query_pci {
u16 pchid;
u32 bar[PCI_BAR_COUNT];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 : 24;
u32 : 16;
u8 fmb_len;
u8 pft; /* pci function type */
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
......
......@@ -62,7 +62,7 @@ static inline void *memchr(const void * s, int c, size_t n)
" jl 1f\n"
" la %0,0\n"
"1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
......@@ -74,7 +74,7 @@ static inline void *memscan(void *s, int c, size_t n)
asm volatile(
"0: srst %0,%1\n"
" jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
......@@ -115,7 +115,7 @@ static inline size_t strlen(const char *s)
asm volatile(
"0: srst %0,%1\n"
" jo 0b"
: "+d" (r0), "+a" (tmp) : : "cc");
: "+d" (r0), "+a" (tmp) : : "cc", "memory");
return r0 - (unsigned long) s;
}
......@@ -128,7 +128,7 @@ static inline size_t strnlen(const char * s, size_t n)
asm volatile(
"0: srst %0,%1\n"
" jo 0b"
: "+a" (end), "+a" (tmp) : "d" (r0) : "cc");
: "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
return end - s;
}
#else /* IN_ARCH_STRING_C */
......
/*
* Tracepoint definitions for the s390 zcrypt device driver
*
* Copyright IBM Corp. 2016
* Author(s): Harald Freudenberger <freude@de.ibm.com>
*
* Currently there are two tracepoint events defined here.
* An s390_zcrypt_req request event occurs as soon as the request is
* recognized by the zcrypt ioctl function. This event may act as some kind
* of request-processing-starts-now indication.
* As late as possible within the zcrypt ioctl function there occurs the
* s390_zcrypt_rep event which may act as the point in time where the
* request has been processed by the kernel and the result is about to be
* transferred back to userspace.
* The glue which binds together request and reply event is the ptr
* parameter, which is the local buffer address where the request from
* userspace has been stored by the ioctl function.
*
* The main purpose of this zcrypt tracepoint api is to get some data for
* performance measurements together with information about on which card
* and queue the request has been processed. It is not an ffdc interface as
* there is already code in the zcrypt device driver to serve the s390
* debug feature interface.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM s390
#if !defined(_TRACE_S390_ZCRYPT_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_S390_ZCRYPT_H
#include <linux/tracepoint.h>
#define TP_ICARSAMODEXPO 0x0001
#define TP_ICARSACRT 0x0002
#define TB_ZSECSENDCPRB 0x0003
#define TP_ZSENDEP11CPRB 0x0004
#define TP_HWRNGCPRB 0x0005
#define show_zcrypt_tp_type(type) \
__print_symbolic(type, \
{ TP_ICARSAMODEXPO, "ICARSAMODEXPO" }, \
{ TP_ICARSACRT, "ICARSACRT" }, \
{ TB_ZSECSENDCPRB, "ZSECSENDCPRB" }, \
{ TP_ZSENDEP11CPRB, "ZSENDEP11CPRB" }, \
{ TP_HWRNGCPRB, "HWRNGCPRB" })
/**
* trace_s390_zcrypt_req - zcrypt request tracepoint function
* @ptr: Address of the local buffer where the request from userspace
* is stored. Can be used as a unique id to relate together
* request and reply.
* @type: One of the TP_ defines above.
*
* Called when a request from userspace is recognised within the ioctl
* function of the zcrypt device driver and may act as an entry
* timestamp.
*/
TRACE_EVENT(s390_zcrypt_req,
TP_PROTO(void *ptr, u32 type),
TP_ARGS(ptr, type),
TP_STRUCT__entry(
__field(void *, ptr)
__field(u32, type)),
TP_fast_assign(
__entry->ptr = ptr;
__entry->type = type;),
TP_printk("ptr=%p type=%s",
__entry->ptr,
show_zcrypt_tp_type(__entry->type))
);
/**
* trace_s390_zcrypt_rep - zcrypt reply tracepoint function
* @ptr: Address of the local buffer where the request from userspace
* is stored. Can be used as a unique id to match together
* request and reply.
* @fc: Function code.
* @rc: The bare returncode as returned by the device driver ioctl
* function.
* @dev: The adapter nr where this request was actually processed.
* @dom: Domain id of the device where this request was processed.
*
* Called upon recognising the reply from the crypto adapter. This
* message may act as the exit timestamp for the request but also
* carries some info about on which adapter the request was processed
* and the returncode from the device driver.
*/
TRACE_EVENT(s390_zcrypt_rep,
TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom),
TP_ARGS(ptr, fc, rc, dev, dom),
TP_STRUCT__entry(
__field(void *, ptr)
__field(u32, fc)
__field(u32, rc)
__field(u16, device)
__field(u16, domain)),
TP_fast_assign(
__entry->ptr = ptr;
__entry->fc = fc;
__entry->rc = rc;
__entry->device = dev;
__entry->domain = dom;),
TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx",
__entry->ptr,
(unsigned int) __entry->fc,
(int) __entry->rc,
(unsigned short) __entry->device,
(unsigned short) __entry->domain)
);
#endif /* _TRACE_S390_ZCRYPT_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH asm/trace
#define TRACE_INCLUDE_FILE zcrypt
#include <trace/define_trace.h>
......@@ -215,6 +215,42 @@ struct ep11_urb {
uint64_t resp;
} __attribute__((packed));
/**
* struct zcrypt_device_status
* @hwtype: raw hardware type
* @qid: 6 bit device index, 8 bit domain
* @functions: AP device function bit field 'abcdef'
* a, b, c = reserved
* d = CCA coprocessor
* e = Accelerator
* f = EP11 coprocessor
* @online online status
* @reserved reserved
*/
struct zcrypt_device_status {
unsigned int hwtype:8;
unsigned int qid:14;
unsigned int online:1;
unsigned int functions:6;
unsigned int reserved:3;
};
#define MAX_ZDEV_CARDIDS 64
#define MAX_ZDEV_DOMAINS 256
/**
* Maximum number of zcrypt devices
*/
#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS)
/**
* zcrypt_device_matrix
* Device matrix of all zcrypt devices
*/
struct zcrypt_device_matrix {
struct zcrypt_device_status device[MAX_ZDEV_ENTRIES];
};
#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
#define ZCRYPT_IOCTL_MAGIC 'z'
......@@ -321,6 +357,7 @@ struct ep11_urb {
#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
/* New status calls */
#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
......
......@@ -417,7 +417,7 @@ static __init void memmove_early(void *dst, const void *src, size_t n)
" brctg %[n],0b\n"
"1:\n"
: [addr] "=&d" (addr),
[psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr),
[psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
[dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
: [incr] "d" (incr)
: "cc", "memory");
......
......@@ -102,7 +102,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
{
int kill_task;
u64 zero;
void *fpt_save_area, *fpt_creg_save_area;
void *fpt_save_area;
kill_task = 0;
zero = 0;
......@@ -130,7 +130,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
kill_task = 1;
}
fpt_save_area = &S390_lowcore.floating_pt_save_area;
fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
if (!mci.fc) {
/*
* Floating point control register can't be restored.
......@@ -142,11 +141,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/
if (S390_lowcore.fpu_flags & KERNEL_FPC)
s390_handle_damage();
asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
asm volatile("lfpc %0" : : "Q" (zero));
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
} else
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
} else {
asm volatile("lfpc %0"
: : "Q" (S390_lowcore.fpt_creg_save_area));
}
if (!MACHINE_HAS_VX) {
/* Validate floating point registers */
......@@ -167,7 +168,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
" ld 13,104(%0)\n"
" ld 14,112(%0)\n"
" ld 15,120(%0)\n"
: : "a" (fpt_save_area));
: : "a" (fpt_save_area) : "memory");
} else {
/* Validate vector registers */
union ctlreg0 cr0;
......@@ -217,7 +218,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
} else {
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area));
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
/*
* We don't even try to validate the TOD register, since we simply
......@@ -234,9 +235,9 @@ static int notrace s390_validate_registers(union mci mci, int umode)
: : : "0", "cc");
else
asm volatile(
" l 0,0(%0)\n"
" l 0,%0\n"
" sckpf"
: : "a" (&S390_lowcore.tod_progreg_save_area)
: : "Q" (S390_lowcore.tod_progreg_save_area)
: "0", "cc");
/* Validate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator);
......
......@@ -485,7 +485,7 @@ static void __init setup_memory_end(void)
max_pfn = max_low_pfn = PFN_DOWN(memory_end);
memblock_remove(memory_end, ULONG_MAX);
pr_notice("Max memory size: %luMB\n", memory_end >> 20);
pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
}
static void __init setup_vmcoreinfo(void)
......@@ -650,7 +650,7 @@ static void __init check_initrd(void)
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE &&
!memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
pr_err("initrd does not fit memory.\n");
pr_err("The initial RAM disk does not fit into the memory\n");
memblock_free(INITRD_START, INITRD_SIZE);
initrd_start = initrd_end = 0;
}
......
......@@ -20,7 +20,7 @@ static inline char *__strend(const char *s)
asm volatile ("0: srst %0,%1\n"
" jo 0b"
: "+d" (r0), "+a" (s) : : "cc" );
: "+d" (r0), "+a" (s) : : "cc", "memory");
return (char *) r0;
}
......@@ -31,7 +31,7 @@ static inline char *__strnend(const char *s, size_t n)
asm volatile ("0: srst %0,%1\n"
" jo 0b"
: "+d" (p), "+a" (s) : "d" (r0) : "cc" );
: "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
return (char *) p;
}
......@@ -213,7 +213,7 @@ int strcmp(const char *cs, const char *ct)
" sr %0,%1\n"
"1:"
: "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
: : "cc" );
: : "cc", "memory");
return ret;
}
EXPORT_SYMBOL(strcmp);
......@@ -250,7 +250,7 @@ static inline int clcle(const char *s1, unsigned long l1,
" ipm %0\n"
" srl %0,28"
: "=&d" (cc), "+a" (r2), "+a" (r3),
"+a" (r4), "+a" (r5) : : "cc");
"+a" (r4), "+a" (r5) : : "cc", "memory");
return cc;
}
......@@ -298,7 +298,7 @@ void *memchr(const void *s, int c, size_t n)
" jl 1f\n"
" la %0,0\n"
"1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
EXPORT_SYMBOL(memchr);
......@@ -336,7 +336,7 @@ void *memscan(void *s, int c, size_t n)
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
EXPORT_SYMBOL(memscan);
......@@ -122,7 +122,7 @@ dcss_set_subcodes(void)
"1: la %2,3\n"
"2:\n"
EX_TABLE(0b, 1b)
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory");
kfree(name);
/* Diag x'64' new subcodes are supported, set to new subcodes */
......
......@@ -180,7 +180,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
{
struct mod_pci_args args = { 0, 0, 0, 0 };
if (zdev->fmb)
if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
return -EINVAL;
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
......
......@@ -148,6 +148,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
zdev->pft = response->pft;
zdev->vfn = response->vfn;
zdev->uid = response->uid;
zdev->fmb_length = sizeof(u32) * response->fmb_len;
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
if (response->util_str_avail) {
......
......@@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
case 0x0D:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No syn byte in count "
"FORMAT 4 - No sync byte in count "
"address area; offset active\n");
break;
case 0x0E:
......@@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
case 0x0F:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No syn byte in data area; "
"FORMAT 4 - No sync byte in data area; "
"offset active\n");
break;
default:
......@@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT D - Reserved\n");
"FORMAT F - Reserved\n");
}
break;
......
......@@ -2,10 +2,11 @@
# S/390 crypto devices
#
ap-objs := ap_bus.o
# zcrypt_api depends on ap
obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
# msgtype* depend on zcrypt_api
obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
# adapter drivers depend on ap, zcrypt_api and msgtype*
ap-objs := ap_bus.o ap_card.o ap_queue.o
obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus inline assemblies.
*/
#ifndef _AP_ASM_H_
#define _AP_ASM_H_
#include <asm/isc.h>
/**
* ap_intructions_available() - Test if AP instructions are available.
*
* Returns 0 if the AP instructions are installed.
*/
static inline int ap_instructions_available(void)
{
register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
register unsigned long reg1 asm ("1") = -ENODEV;
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(
" .long 0xb2af0000\n" /* PQAP(TAPQ) */
"0: la %1,0\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
return reg1;
}
/**
* ap_tapq(): Test adjunct processor queue.
* @qid: The AP queue number
* @info: Pointer to queue descriptor
*
* Returns AP queue status structure.
*/
static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
{
register unsigned long reg0 asm ("0") = qid;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
if (info)
*info = reg2;
return reg1;
}
/**
* ap_pqap_rapq(): Reset adjunct processor queue.
* @qid: The AP queue number
*
* Returns AP queue status structure.
*/
static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
{
register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(
".long 0xb2af0000" /* PQAP(RAPQ) */
: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
return reg1;
}
/**
* ap_aqic(): Enable interruption for a specific AP.
* @qid: The AP queue number
* @ind: The notification indicator byte
*
* Returns AP queue status.
*/
static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind)
{
register unsigned long reg0 asm ("0") = qid | (3UL << 24);
register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC;
register struct ap_queue_status reg1_out asm ("1");
register void *reg2 asm ("2") = ind;
asm volatile(
".long 0xb2af0000" /* PQAP(AQIC) */
: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
:
: "cc");
return reg1_out;
}
/**
* ap_qci(): Get AP configuration data
*
* Returns 0 on success, or -EOPNOTSUPP.
*/
static inline int ap_qci(void *config)
{
register unsigned long reg0 asm ("0") = 0x04000000UL;
register unsigned long reg1 asm ("1") = -EINVAL;
register void *reg2 asm ("2") = (void *) config;
asm volatile(
".long 0xb2af0000\n" /* PQAP(QCI) */
"0: la %1,0\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (reg0), "+d" (reg1), "+d" (reg2)
:
: "cc", "memory");
return reg1;
}
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
* Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
unsigned long long psmid,
void *msg, size_t length)
{
struct msgblock { char _[length]; };
register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = (unsigned long) msg;
register unsigned long reg3 asm ("3") = (unsigned long) length;
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
asm volatile (
"0: .long 0xb2ad0042\n" /* NQAP */
" brc 2,0b"
: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
: "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
: "cc");
return reg1;
}
/**
* ap_dqap(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
* @msg: The message text
* @length: The message length
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
* but only partially. The response is incomplete, hence the
* DQAP is repeated.
* Condition code 2 on DQAP also means the receive is incomplete,
* this time because a segment boundary was reached. Again, the
* DQAP is repeated.
* Note that gpr2 is used by the DQAP instruction to keep track of
* any 'residual' length, in case the instruction gets interrupted.
* Hence it gets zeroed before the instruction.
*/
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
unsigned long long *psmid,
void *msg, size_t length)
{
struct msgblock { char _[length]; };
register unsigned long reg0 asm("0") = qid | 0x80000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm("2") = 0UL;
register unsigned long reg4 asm("4") = (unsigned long) msg;
register unsigned long reg5 asm("5") = (unsigned long) length;
register unsigned long reg6 asm("6") = 0UL;
register unsigned long reg7 asm("7") = 0UL;
asm volatile(
"0: .long 0xb2ae0064\n" /* DQAP */
" brc 6,0b\n"
: "+d" (reg0), "=d" (reg1), "+d" (reg2),
"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
"=m" (*(struct msgblock *) msg) : : "cc");
*psmid = (((unsigned long long) reg6) << 32) + reg7;
return reg1;
}
#endif /* _AP_ASM_H_ */
此差异已折叠。
......@@ -27,7 +27,6 @@
#define _AP_BUS_H_
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
#define AP_DEVICES 64 /* Number of AP devices. */
......@@ -38,14 +37,17 @@
extern int ap_domain_index;
extern spinlock_t ap_list_lock;
extern struct list_head ap_card_list;
/**
* The ap_qid_t identifier of an ap queue. It contains a
* 6 bit device index and a 4 bit queue index (domain).
* 6 bit card index and a 4 bit queue index (domain).
*/
typedef unsigned int ap_qid_t;
#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255))
#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
#define AP_QID_QUEUE(_qid) ((_qid) & 255)
/**
......@@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t;
* @queue_full: Is 1 if the queue is full
* @pad: A 4 bit pad
* @int_enabled: Shows if interrupts are enabled for the AP
* @response_conde: Holds the 8 bit response code
* @response_code: Holds the 8 bit response code
* @pad2: A 16 bit pad
*
* The ap queue status word is returned by all three AP functions
......@@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
#define AP_DEVICE_TYPE_CEX6 12
/*
* Known function facilities
......@@ -166,7 +169,8 @@ struct ap_driver {
int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *);
int request_timeout; /* request timeout in jiffies */
void (*suspend)(struct ap_device *);
void (*resume)(struct ap_device *);
};
#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
......@@ -174,38 +178,51 @@ struct ap_driver {
int ap_driver_register(struct ap_driver *, struct module *, char *);
void ap_driver_unregister(struct ap_driver *);
typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev);
struct ap_device {
struct device device;
struct ap_driver *drv; /* Pointer to AP device driver. */
spinlock_t lock; /* Per device lock. */
struct list_head list; /* private list of all AP devices. */
int device_type; /* AP device type. */
};
enum ap_state state; /* State of the AP device. */
#define to_ap_dev(x) container_of((x), struct ap_device, device)
ap_qid_t qid; /* AP queue id. */
int queue_depth; /* AP queue depth.*/
int device_type; /* AP device type. */
struct ap_card {
struct ap_device ap_dev;
struct list_head list; /* Private list of AP cards. */
struct list_head queues; /* List of assoc. AP queues */
void *private; /* ap driver private pointer. */
int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */
struct timer_list timeout; /* Timer for request timeouts. */
int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
atomic_t total_request_count; /* # requests ever for this AP device.*/
};
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
struct ap_queue {
struct ap_device ap_dev;
struct list_head list; /* Private list of AP queues. */
struct ap_card *card; /* Ptr to assoc. AP card. */
spinlock_t lock; /* Per device lock. */
void *private; /* ap driver private pointer. */
ap_qid_t qid; /* AP queue id. */
int interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */
struct list_head pendingq; /* List of message sent to AP queue. */
enum ap_state state; /* State of the AP device. */
int pendingq_count; /* # requests on pendingq list. */
struct list_head requestq; /* List of message yet to be sent. */
int requestq_count; /* # requests on requestq list. */
int total_request_count; /* # requests ever for this AP device. */
int total_request_count; /* # requests ever for this AP device.*/
int request_timeout; /* Request timout in jiffies. */
struct timer_list timeout; /* Timer for request timeouts. */
struct list_head pendingq; /* List of message sent to AP queue. */
struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */
void *private; /* ap driver private pointer. */
};
#define to_ap_dev(x) container_of((x), struct ap_device, device)
#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
struct ap_message {
struct list_head list; /* Request queueing. */
......@@ -217,7 +234,7 @@ struct ap_message {
void *private; /* ap driver private pointer. */
unsigned int special:1; /* Used for special commands. */
/* receive is called from tasklet context */
void (*receive)(struct ap_device *, struct ap_message *,
void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *);
};
......@@ -232,10 +249,6 @@ struct ap_config_info {
unsigned char reserved4[16];
} __packed;
#define AP_DEVICE(dt) \
.dev_type=(dt), \
.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
/**
* ap_init_message() - Initialize ap_message.
* Initialize a message before using. Otherwise this might result in
......@@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg)
ap_msg->receive = NULL;
}
#define for_each_ap_card(_ac) \
list_for_each_entry(_ac, &ap_card_list, list)
#define for_each_ap_queue(_aq, _ac) \
list_for_each_entry(_aq, &(_ac)->queues, list)
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
......@@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg)
int ap_send(ap_qid_t, unsigned long long, void *, size_t);
int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_device *ap_dev);
enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
void ap_wait(enum ap_wait wait);
void ap_request_timeout(unsigned long data);
void ap_bus_force_rescan(void);
void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
unsigned int device_functions);
int ap_module_init(void);
void ap_module_exit(void);
......
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus, card related code.
*/
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
#include "ap_bus.h"
#include "ap_asm.h"
/*
* AP card related attributes.
*/
static ssize_t ap_hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
}
static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
static ssize_t ap_raw_hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
}
static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
}
static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
static ssize_t ap_functions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
}
static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
unsigned int req_cnt;
req_cnt = 0;
spin_lock_bh(&ap_list_lock);
req_cnt = atomic_read(&ac->total_request_count);
spin_unlock_bh(&ap_list_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
unsigned int reqq_cnt;
reqq_cnt = 0;
spin_lock_bh(&ap_list_lock);
for_each_ap_queue(aq, ac)
reqq_cnt += aq->requestq_count;
spin_unlock_bh(&ap_list_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
static ssize_t ap_pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
unsigned int penq_cnt;
penq_cnt = 0;
spin_lock_bh(&ap_list_lock);
for_each_ap_queue(aq, ac)
penq_cnt += aq->pendingq_count;
spin_unlock_bh(&ap_list_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
static ssize_t ap_modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_raw_hwtype.attr,
&dev_attr_depth.attr,
&dev_attr_ap_functions.attr,
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_modalias.attr,
NULL
};
static struct attribute_group ap_card_dev_attr_group = {
.attrs = ap_card_dev_attrs
};
static const struct attribute_group *ap_card_dev_attr_groups[] = {
&ap_card_dev_attr_group,
NULL
};
struct device_type ap_card_type = {
.name = "ap_card",
.groups = ap_card_dev_attr_groups,
};
static void ap_card_device_release(struct device *dev)
{
kfree(to_ap_card(dev));
}
struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
unsigned int functions)
{
struct ap_card *ac;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
ac->ap_dev.device_type = device_type;
/* CEX6 toleration: map to CEX5 */
if (device_type == AP_DEVICE_TYPE_CEX6)
ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
ac->raw_hwtype = device_type;
ac->queue_depth = queue_depth;
ac->functions = functions;
ac->id = id;
return ac;
}
/*
* Copyright IBM Corp. 2016
* Author(s): Harald Freudenberger <freude@de.ibm.com>
*/
#ifndef AP_DEBUG_H
#define AP_DEBUG_H
#include <asm/debug.h>
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 5 /* informational */
#define DBF_DEBUG 6 /* for debugging only */
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
#define DBF_MAX_SPRINTF_ARGS 5
#define AP_DBF(...) \
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
extern debug_info_t *ap_dbf_info;
int ap_debug_init(void);
void ap_debug_exit(void);
#endif /* AP_DEBUG_H */
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus, queue related code.
*/
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
#include "ap_bus.h"
#include "ap_asm.h"
/**
* ap_queue_enable_interruption(): Enable interruption on an AP queue.
* @qid: The AP queue number
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_aqic(). Based on the return
* value it waits a while and tests the AP queue if interrupts
* have been switched on using ap_test_queue().
*/
static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
{
struct ap_queue_status status;
status = ap_aqic(aq->qid, ind);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
return 0;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_INVALID_ADDRESS:
pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid));
return -EOPNOTSUPP;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
default:
return -EBUSY;
}
}
/**
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
* @special: Special Bit
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
* Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
unsigned int special)
{
if (special == 1)
qid |= 0x400000UL;
return ap_nqap(qid, psmid, msg, length);
}
int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
{
struct ap_queue_status status;
status = __ap_send(qid, psmid, msg, length, 0);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_Q_FULL:
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
case AP_RESPONSE_REQ_FAC_NOT_INST:
return -EINVAL;
default: /* Device is gone. */
return -ENODEV;
}
}
EXPORT_SYMBOL(ap_send);
int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
{
struct ap_queue_status status;
if (msg == NULL)
return -EINVAL;
status = ap_dqap(qid, psmid, msg, length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_NO_PENDING_REPLY:
if (status.queue_empty)
return -ENOENT;
return -EBUSY;
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
default:
return -ENODEV;
}
}
EXPORT_SYMBOL(ap_recv);
/* State machine definitions and helpers */
static enum ap_wait ap_sm_nop(struct ap_queue *aq)
{
return AP_WAIT_NONE;
}
/**
* ap_sm_recv(): Receive pending reply messages from an AP queue but do
* not change the state of the device.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
status = ap_dqap(aq->qid, &aq->reply->psmid,
aq->reply->message, aq->reply->length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count--;
if (aq->queue_count > 0)
mod_timer(&aq->timeout,
jiffies + aq->request_timeout);
list_for_each_entry(ap_msg, &aq->pendingq, list) {
if (ap_msg->psmid != aq->reply->psmid)
continue;
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->receive(aq, ap_msg, aq->reply);
break;
}
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
/* The card shouldn't forget requests but who knows. */
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
aq->pendingq_count = 0;
break;
default:
break;
}
return status;
}
/**
* ap_sm_read(): Receive pending reply messages from an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_read(struct ap_queue *aq)
{
struct ap_queue_status status;
if (!aq->reply)
return AP_WAIT_NONE;
status = ap_sm_recv(aq);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0) {
aq->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
aq->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
return AP_WAIT_INTERRUPT;
aq->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_suspend_read(): Receive pending reply messages from an AP queue
* without changing the device state in between. In suspend mode we don't
* allow sending new requests, therefore just fetch pending replies.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE or AP_WAIT_AGAIN
*/
static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
{
struct ap_queue_status status;
if (!aq->reply)
return AP_WAIT_NONE;
status = ap_sm_recv(aq);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
return AP_WAIT_AGAIN;
/* fall through */
default:
return AP_WAIT_NONE;
}
}
/**
* ap_sm_write(): Send messages from the request queue to an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_write(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
if (aq->requestq_count <= 0)
return AP_WAIT_NONE;
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
status = __ap_send(aq->qid, ap_msg->psmid,
ap_msg->message, ap_msg->length, ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count++;
if (aq->queue_count == 1)
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
list_move_tail(&ap_msg->list, &aq->pendingq);
aq->requestq_count--;
aq->pendingq_count++;
if (aq->queue_count < aq->card->queue_depth) {
aq->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
/* fall through */
case AP_RESPONSE_Q_FULL:
aq->state = AP_STATE_QUEUE_FULL;
return AP_WAIT_INTERRUPT;
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->state = AP_STATE_RESET_WAIT;
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_MESSAGE_TOO_BIG:
case AP_RESPONSE_REQ_FAC_NOT_INST:
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EINVAL;
ap_msg->receive(aq, ap_msg, NULL);
return AP_WAIT_AGAIN;
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_read_write(): Send and receive messages to/from an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
{
return min(ap_sm_read(aq), ap_sm_write(aq));
}
/**
* ap_sm_reset(): Reset an AP queue.
* @qid: The AP queue number
*
* Submit the Reset command to an AP queue.
*/
static enum ap_wait ap_sm_reset(struct ap_queue *aq)
{
struct ap_queue_status status;
status = ap_rapq(aq->qid);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->state = AP_STATE_RESET_WAIT;
aq->interrupt = AP_INTR_DISABLED;
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_BUSY:
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_reset_wait(): Test queue for completion of the reset operation
* @aq: pointer to the AP queue
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
void *lsi_ptr;
if (aq->queue_count > 0 && aq->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(aq);
else
/* Get the status with TAPQ */
status = ap_tapq(aq->qid, NULL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
lsi_ptr = ap_airq_ptr();
if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
aq->state = AP_STATE_SETIRQ_WAIT;
else
aq->state = (aq->queue_count > 0) ?
AP_STATE_WORKING : AP_STATE_IDLE;
return AP_WAIT_AGAIN;
case AP_RESPONSE_BUSY:
case AP_RESPONSE_RESET_IN_PROGRESS:
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_setirq_wait(): Test queue for completion of the irq enablement
* @aq: pointer to the AP queue
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
if (aq->queue_count > 0 && aq->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(aq);
else
/* Get the status with TAPQ */
status = ap_tapq(aq->qid, NULL);
if (status.int_enabled == 1) {
/* Irqs are now enabled */
aq->interrupt = AP_INTR_ENABLED;
aq->state = (aq->queue_count > 0) ?
AP_STATE_WORKING : AP_STATE_IDLE;
}
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
return AP_WAIT_AGAIN;
/* fallthrough */
case AP_RESPONSE_NO_PENDING_REPLY:
return AP_WAIT_TIMEOUT;
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/*
* AP state machine jump table
*/
static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_STATE_RESET_START] = {
[AP_EVENT_POLL] = ap_sm_reset,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_RESET_WAIT] = {
[AP_EVENT_POLL] = ap_sm_reset_wait,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_SETIRQ_WAIT] = {
[AP_EVENT_POLL] = ap_sm_setirq_wait,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_IDLE] = {
[AP_EVENT_POLL] = ap_sm_write,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_WORKING] = {
[AP_EVENT_POLL] = ap_sm_read_write,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_QUEUE_FULL] = {
[AP_EVENT_POLL] = ap_sm_read,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_SUSPEND_WAIT] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_BORKED] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
};
enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
{
return ap_jumptable[aq->state][event](aq);
}
enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
{
enum ap_wait wait;
while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
;
return wait;
}
/*
* Power management for queue devices
*/
void ap_queue_suspend(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
/* Poll on the device until all requests are finished. */
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_SUSPEND_WAIT;
while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
;
aq->state = AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_suspend);
void ap_queue_resume(struct ap_device *ap_dev)
{
}
EXPORT_SYMBOL(ap_queue_resume);
/*
* AP queue related attributes.
*/
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int req_cnt;
spin_lock_bh(&aq->lock);
req_cnt = aq->total_request_count;
spin_unlock_bh(&aq->lock);
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int reqq_cnt = 0;
spin_lock_bh(&aq->lock);
reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
static ssize_t ap_pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int penq_cnt = 0;
spin_lock_bh(&aq->lock);
penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
static ssize_t ap_reset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
spin_lock_bh(&aq->lock);
switch (aq->state) {
case AP_STATE_RESET_START:
case AP_STATE_RESET_WAIT:
rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
break;
case AP_STATE_WORKING:
case AP_STATE_QUEUE_FULL:
rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
break;
default:
rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
}
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
static ssize_t ap_interrupt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
spin_lock_bh(&aq->lock);
if (aq->state == AP_STATE_SETIRQ_WAIT)
rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
else if (aq->interrupt == AP_INTR_ENABLED)
rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
else
rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_reset.attr,
&dev_attr_interrupt.attr,
NULL
};
static struct attribute_group ap_queue_dev_attr_group = {
.attrs = ap_queue_dev_attrs
};
static const struct attribute_group *ap_queue_dev_attr_groups[] = {
&ap_queue_dev_attr_group,
NULL
};
struct device_type ap_queue_type = {
.name = "ap_queue",
.groups = ap_queue_dev_attr_groups,
};
static void ap_queue_device_release(struct device *dev)
{
kfree(to_ap_queue(dev));
}
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
{
struct ap_queue *aq;
aq = kzalloc(sizeof(*aq), GFP_KERNEL);
if (!aq)
return NULL;
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
/* CEX6 toleration: map to CEX5 */
if (device_type == AP_DEVICE_TYPE_CEX6)
aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
aq->qid = qid;
aq->state = AP_STATE_RESET_START;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
return aq;
}
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
{
aq->reply = reply;
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_reply);
/**
* ap_queue_message(): Queue a request to an AP device.
* @aq: The AP device to queue the message to
* @ap_msg: The message that is to be added
*/
void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
/* For asynchronous message handling a valid receive-callback
* is required.
*/
BUG_ON(!ap_msg->receive);
spin_lock_bh(&aq->lock);
/* Queue the message. */
list_add_tail(&ap_msg->list, &aq->requestq);
aq->requestq_count++;
aq->total_request_count++;
atomic_inc(&aq->card->total_request_count);
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_message);
/**
* ap_cancel_message(): Cancel a crypto request.
* @aq: The AP device that has the message queued
* @ap_msg: The message that is to be removed
*
* Cancel a crypto request. This is done by removing the request
* from the device pending or request queue. Note that the
* request stays on the AP queue. When it finishes the message
* reply will be discarded because the psmid can't be found.
*/
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
struct ap_message *tmp;
spin_lock_bh(&aq->lock);
if (!list_empty(&ap_msg->list)) {
list_for_each_entry(tmp, &aq->pendingq, list)
if (tmp->psmid == ap_msg->psmid) {
aq->pendingq_count--;
goto found;
}
aq->requestq_count--;
found:
list_del_init(&ap_msg->list);
}
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_cancel_message);
/**
* __ap_flush_queue(): Flush requests.
* @aq: Pointer to the AP queue
*
* Flush all requests from the request/pending queue of an AP device.
*/
static void __ap_flush_queue(struct ap_queue *aq)
{
struct ap_message *ap_msg, *next;
list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
}
void ap_flush_queue(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_flush_queue);
void ap_queue_remove(struct ap_queue *aq)
{
ap_flush_queue(aq);
del_timer_sync(&aq->timeout);
}
EXPORT_SYMBOL(ap_queue_remove);
此差异已折叠。
......@@ -84,57 +84,110 @@ struct ica_z90_status {
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
struct zcrypt_device;
/*
* Identifier for Crypto Request Performance Index
*/
enum crypto_ops {
MEX_1K,
MEX_2K,
MEX_4K,
CRT_1K,
CRT_2K,
CRT_4K,
HWRNG,
SECKEY,
NUM_OPS
};
struct zcrypt_queue;
struct zcrypt_ops {
long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
long (*rsa_modexpo_crt)(struct zcrypt_device *,
long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
long (*rsa_modexpo_crt)(struct zcrypt_queue *,
struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
long (*rng)(struct zcrypt_device *, char *);
long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
struct ap_message *);
long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
struct ap_message *);
long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
struct list_head list; /* zcrypt ops list. */
struct module *owner;
int variant;
char name[128];
};
struct zcrypt_device {
struct zcrypt_card {
struct list_head list; /* Device list. */
spinlock_t lock; /* Per device lock. */
struct list_head zqueues; /* List of zcrypt queues */
struct kref refcount; /* device refcounting */
struct ap_device *ap_dev; /* The "real" ap device. */
struct zcrypt_ops *ops; /* Crypto operations. */
struct ap_card *card; /* The "real" ap card device. */
int online; /* User online/offline */
int user_space_type; /* User space device id. */
char *type_string; /* User space device name. */
int min_mod_size; /* Min number of bits. */
int max_mod_size; /* Max number of bits. */
int short_crt; /* Card has crt length restriction. */
int speed_rating; /* Speed of the crypto device. */
int max_exp_bit_length;
int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */
atomic_t load; /* Utilization of the crypto device */
int request_count; /* # current requests. */
};
struct ap_message reply; /* Per-device reply structure. */
int max_exp_bit_length;
struct zcrypt_queue {
struct list_head list; /* Device list. */
struct kref refcount; /* device refcounting */
struct zcrypt_card *zcard;
struct zcrypt_ops *ops; /* Crypto operations. */
struct ap_queue *queue; /* The "real" ap queue device. */
int online; /* User online/offline */
atomic_t load; /* Utilization of the crypto device */
debug_info_t *dbf_area; /* debugging */
int request_count; /* # current requests. */
struct ap_message reply; /* Per-device reply structure. */
};
/* transport layer rescanning */
extern atomic_t zcrypt_rescan_req;
struct zcrypt_device *zcrypt_device_alloc(size_t);
void zcrypt_device_free(struct zcrypt_device *);
void zcrypt_device_get(struct zcrypt_device *);
int zcrypt_device_put(struct zcrypt_device *);
int zcrypt_device_register(struct zcrypt_device *);
void zcrypt_device_unregister(struct zcrypt_device *);
extern spinlock_t zcrypt_list_lock;
extern int zcrypt_device_count;
extern struct list_head zcrypt_card_list;
#define for_each_zcrypt_card(_zc) \
list_for_each_entry(_zc, &zcrypt_card_list, list)
#define for_each_zcrypt_queue(_zq, _zc) \
list_for_each_entry(_zq, &(_zc)->zqueues, list)
struct zcrypt_card *zcrypt_card_alloc(void);
void zcrypt_card_free(struct zcrypt_card *);
void zcrypt_card_get(struct zcrypt_card *);
int zcrypt_card_put(struct zcrypt_card *);
int zcrypt_card_register(struct zcrypt_card *);
void zcrypt_card_unregister(struct zcrypt_card *);
struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
unsigned int, unsigned int);
void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
struct zcrypt_queue *zcrypt_queue_alloc(size_t);
void zcrypt_queue_free(struct zcrypt_queue *);
void zcrypt_queue_get(struct zcrypt_queue *);
int zcrypt_queue_put(struct zcrypt_queue *);
int zcrypt_queue_register(struct zcrypt_queue *);
void zcrypt_queue_unregister(struct zcrypt_queue *);
void zcrypt_queue_force_online(struct zcrypt_queue *, int);
struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
int zcrypt_rng_device_add(void);
void zcrypt_rng_device_remove(void);
void zcrypt_msgtype_register(struct zcrypt_ops *);
void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int);
void zcrypt_msgtype_release(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright IBM Corp. 2012
* Copyright IBM Corp. 2016
* Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
* Harald Freudenberger <freude@de.ibm.com>
*/
#ifndef ZCRYPT_DEBUG_H
#define ZCRYPT_DEBUG_H
#include <asm/debug.h>
#include "zcrypt_api.h"
/* that gives us 15 characters in the text event views */
#define ZCRYPT_DBF_LEN 16
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 5 /* informational */
#define DBF_DEBUG 6 /* for debugging only */
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
#define ZCRYPT_DBF_COMMON(level, text...) \
do { \
if (debug_level_enabled(zcrypt_dbf_common, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_common, level, \
debug_buffer); \
} \
} while (0)
#define ZCRYPT_DBF_DEVICES(level, text...) \
do { \
if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_devices, level, \
debug_buffer); \
} \
} while (0)
#define ZCRYPT_DBF_DEV(level, device, text...) \
do { \
if (debug_level_enabled(device->dbf_area, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(device->dbf_area, level, \
debug_buffer); \
} \
} while (0)
#define DBF_MAX_SPRINTF_ARGS 5
#define ZCRYPT_DBF(...) \
debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
extern debug_info_t *zcrypt_dbf_info;
int zcrypt_debug_init(void);
void zcrypt_debug_exit(void);
......
此差异已折叠。
......@@ -35,7 +35,10 @@
#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
int zcrypt_msgtype50_init(void);
unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
void zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void);
#endif /* _ZCRYPT_MSGTYPE50_H_ */
......@@ -116,15 +116,28 @@ struct type86_fmt2_ext {
unsigned int offset4; /* 0x00000000 */
} __packed;
unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
unsigned int *, unsigned short **);
unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
unsigned int *);
unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
#define LOW 10
#define MEDIUM 100
#define HIGH 500
int speed_idx_cca(int);
int speed_idx_ep11(int);
/**
* Prepare a type6 CPRB message for random number generation
*
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
struct ap_message *ap_msg,
unsigned random_number_length)
static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
unsigned int random_number_length,
unsigned int *domain)
{
struct {
struct type6_hdr hdr;
......@@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
msg->hdr.FromCardLen2 = random_number_length,
msg->cprbx = local_cprbx;
msg->cprbx.rpl_datal = random_number_length,
msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
ap_msg->length = sizeof(*msg);
*domain = (unsigned short)msg->cprbx.domain;
}
int zcrypt_msgtype6_init(void);
void zcrypt_msgtype6_init(void);
void zcrypt_msgtype6_exit(void);
#endif /* _ZCRYPT_MSGTYPE6_H_ */
此差异已折叠。
此差异已折叠。
......@@ -175,7 +175,8 @@ struct ap_device_id {
kernel_ulong_t driver_info;
};
#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
#define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01
#define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02
/* s390 css bus devices (subchannels) */
struct css_device_id {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册