提交 30066ce6 编写于 作者: L Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.9:

  API:
   - The crypto engine code now supports hashes.

  Algorithms:
   - Allow keys >= 2048 bits in FIPS mode for RSA.

  Drivers:
   - Memory overwrite fix for vmx ghash.
   - Add support for building ARM sha1-neon in Thumb2 mode.
   - Reenable ARM ghash-ce code by adding import/export.
   - Reenable img-hash by adding import/export.
   - Add support for multiple cores in omap-aes.
   - Add little-endian support for sha1-powerpc.
   - Add Cavium HWRNG driver for ThunderX SoC"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (137 commits)
  crypto: caam - treat SGT address pointer as u64
  crypto: ccp - Make syslog errors human-readable
  crypto: ccp - clean up data structure
  crypto: vmx - Ensure ghash-generic is enabled
  crypto: testmgr - add guard to dst buffer for ahash_export
  crypto: caam - Unmap region obtained by of_iomap
  crypto: sha1-powerpc - little-endian support
  crypto: gcm - Fix IV buffer size in crypto_gcm_setkey
  crypto: vmx - Fix memory corruption caused by p8_ghash
  crypto: ghash-generic - move common definitions to a new header file
  crypto: caam - fix sg dump
  hwrng: omap - Only fail if pm_runtime_get_sync returns < 0
  crypto: omap-sham - shrink the internal buffer size
  crypto: omap-sham - add support for export/import
  crypto: omap-sham - convert driver logic to use sgs for data xmit
  crypto: omap-sham - change the DMA threshold value to a define
  crypto: omap-sham - add support functions for sg based data handling
  crypto: omap-sham - rename sgl to sgl_tmp for deprecation
  crypto: omap-sham - align algorithms on word offset
  crypto: omap-sham - add context export/import stubs
  ...
......@@ -797,7 +797,8 @@ kernel crypto API | Caller
include/linux/crypto.h and their definition can be seen below.
The former function registers a single transformation, while
the latter works on an array of transformation descriptions.
The latter is useful when registering transformations in bulk.
The latter is useful when registering transformations in bulk,
for example when a driver implements multiple transformations.
</para>
<programlisting>
......@@ -822,18 +823,31 @@ kernel crypto API | Caller
</para>
<para>
The bulk registration / unregistration functions require
that struct crypto_alg is an array of count size. These
functions simply loop over that array and register /
unregister each individual algorithm. If an error occurs,
the loop is terminated at the offending algorithm definition.
That means, the algorithms prior to the offending algorithm
are successfully registered. Note, the caller has no way of
knowing which cipher implementations have successfully
registered. If this is important to know, the caller should
loop through the different implementations using the single
instance *_alg functions for each individual implementation.
The bulk registration/unregistration functions
register/unregister each transformation in the given array of
length count. They handle errors as follows:
</para>
<itemizedlist>
<listitem>
<para>
crypto_register_algs() succeeds if and only if it
successfully registers all the given transformations. If an
error occurs partway through, then it rolls back successful
registrations before returning the error code. Note that if
a driver needs to handle registration errors for individual
transformations, then it will need to use the non-bulk
function crypto_register_alg() instead.
</para>
</listitem>
<listitem>
<para>
crypto_unregister_algs() tries to unregister all the given
transformations, continuing on error. It logs errors and
always returns zero.
</para>
</listitem>
</itemizedlist>
</sect1>
<sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
......
......@@ -138,7 +138,7 @@ static struct shash_alg ghash_alg = {
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_name = "__ghash",
.cra_driver_name = "__driver-ghash-ce",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
......@@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req)
}
}
static int ghash_async_import(struct ahash_request *req, const void *in)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
static int ghash_async_export(struct ahash_request *req, void *out)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
return crypto_shash_export(desc, out);
}
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
......@@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
.import = ghash_async_import,
.export = ghash_async_export,
.halg.digestsize = GHASH_DIGEST_SIZE,
.halg.statesize = sizeof(struct ghash_desc_ctx),
.halg.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-ce",
......
......@@ -12,7 +12,6 @@
#include <asm/assembler.h>
.syntax unified
.code 32
.fpu neon
.text
......
......@@ -7,6 +7,15 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#ifdef __BIG_ENDIAN__
#define LWZ(rt, d, ra) \
lwz rt,d(ra)
#else
#define LWZ(rt, d, ra) \
li rt,d; \
lwbrx rt,rt,ra
#endif
/*
* We roll the registers for T, A, B, C, D, E around on each
* iteration; T on iteration t is A on iteration t+1, and so on.
......@@ -23,7 +32,7 @@
#define W(t) (((t)%16)+16)
#define LOADW(t) \
lwz W(t),(t)*4(r4)
LWZ(W(t),(t)*4,r4)
#define STEPD0_LOAD(t) \
andc r0,RD(t),RB(t); \
......@@ -33,7 +42,7 @@
add r0,RE(t),r15; \
add RT(t),RT(t),r6; \
add r14,r0,W(t); \
lwz W((t)+4),((t)+4)*4(r4); \
LWZ(W((t)+4),((t)+4)*4,r4); \
rotlwi RB(t),RB(t),30; \
add RT(t),RT(t),r14
......
......@@ -39,6 +39,37 @@ struct algif_hash_tfm {
bool has_key;
};
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
if (ctx->result)
return 0;
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
if (!ctx->result)
return -ENOMEM;
memset(ctx->result, 0, ds);
return 0;
}
static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
if (!ctx->result)
return;
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
sock_kzfree_s(sk, ctx->result, ds);
ctx->result = NULL;
}
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
......@@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
if (!ctx->more) {
if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx);
err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
&ctx->completion);
if (err)
......@@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ctx->more = msg->msg_flags & MSG_MORE;
if (!ctx->more) {
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
......@@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
if (!(flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
} else if (!ctx->more)
hash_free_result(sk, ctx);
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
if (!(flags & MSG_MORE)) {
......@@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
bool result;
int err;
if (len > ds)
......@@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
msg->msg_flags |= MSG_TRUNC;
lock_sock(sk);
result = ctx->result;
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (ctx->more) {
ctx->more = 0;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
} else if (!result) {
err = af_alg_wait_for_completion(
crypto_ahash_digest(&ctx->req),
&ctx->completion);
}
err = memcpy_to_msg(msg, ctx->result, len);
hash_free_result(sk, ctx);
unlock:
release_sock(sk);
......@@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk)
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
sock_kzfree_s(sk, ctx->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
hash_free_result(sk, ctx);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
......@@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
struct algif_hash_tfm *tfm = private;
struct crypto_ahash *hash = tfm->hash;
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
if (!ctx->result) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->result, 0, ds);
ctx->result = NULL;
ctx->len = len;
ctx->more = 0;
af_alg_init_completion(&ctx->completion);
......
......@@ -107,10 +107,7 @@ static struct shash_alg alg = {
static int __init crct10dif_mod_init(void)
{
int ret;
ret = crypto_register_shash(&alg);
return ret;
return crypto_register_shash(&alg);
}
static void __exit crct10dif_mod_fini(void)
......
......@@ -14,13 +14,12 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <crypto/engine.h>
#include <crypto/internal/hash.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
void crypto_finalize_request(struct crypto_engine *engine,
struct ablkcipher_request *req, int err);
/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine
......@@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
struct ablkcipher_request *req;
struct ahash_request *hreq;
struct ablkcipher_request *breq;
unsigned long flags;
bool was_busy = false;
int ret;
int ret, rtype;
spin_lock_irqsave(&engine->queue_lock, flags);
......@@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!async_req)
goto out;
req = ablkcipher_request_cast(async_req);
engine->cur_req = req;
engine->cur_req = async_req;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
......@@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
......@@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
}
if (engine->prepare_request) {
ret = engine->prepare_request(engine, engine->cur_req);
switch (rtype) {
case CRYPTO_ALG_TYPE_AHASH:
hreq = ahash_request_cast(engine->cur_req);
if (engine->prepare_hash_request) {
ret = engine->prepare_hash_request(engine, hreq);
if (ret) {
pr_err("failed to prepare request: %d\n", ret);
goto req_err;
}
engine->cur_req_prepared = true;
}
ret = engine->hash_one_request(engine, hreq);
if (ret) {
pr_err("failed to prepare request: %d\n", ret);
pr_err("failed to hash one request from queue\n");
goto req_err;
}
engine->cur_req_prepared = true;
}
ret = engine->crypt_one_request(engine, engine->cur_req);
if (ret) {
pr_err("failed to crypt one request from queue\n");
goto req_err;
return;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
breq = ablkcipher_request_cast(engine->cur_req);
if (engine->prepare_cipher_request) {
ret = engine->prepare_cipher_request(engine, breq);
if (ret) {
pr_err("failed to prepare request: %d\n", ret);
goto req_err;
}
engine->cur_req_prepared = true;
}
ret = engine->cipher_one_request(engine, breq);
if (ret) {
pr_err("failed to cipher one request from queue\n");
goto req_err;
}
return;
default:
pr_err("failed to prepare request of unknown type\n");
return;
}
return;
req_err:
crypto_finalize_request(engine, engine->cur_req, ret);
switch (rtype) {
case CRYPTO_ALG_TYPE_AHASH:
hreq = ahash_request_cast(engine->cur_req);
crypto_finalize_hash_request(engine, hreq, ret);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
breq = ablkcipher_request_cast(engine->cur_req);
crypto_finalize_cipher_request(engine, breq, ret);
break;
}
return;
out:
......@@ -137,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work)
}
/**
* crypto_transfer_request - transfer the new request into the engine queue
* crypto_transfer_cipher_request - transfer the new request into the
* enginequeue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_request(struct crypto_engine *engine,
struct ablkcipher_request *req, bool need_pump)
int crypto_transfer_cipher_request(struct crypto_engine *engine,
struct ablkcipher_request *req,
bool need_pump)
{
unsigned long flags;
int ret;
......@@ -162,46 +194,125 @@ int crypto_transfer_request(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_transfer_request);
EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
/**
* crypto_transfer_cipher_request_to_engine - transfer one request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
struct ablkcipher_request *req)
{
return crypto_transfer_cipher_request(engine, req, true);
}
EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
/**
* crypto_transfer_hash_request - transfer the new request into the
* enginequeue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_hash_request(struct crypto_engine *engine,
struct ahash_request *req, bool need_pump)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
if (!engine->running) {
spin_unlock_irqrestore(&engine->queue_lock, flags);
return -ESHUTDOWN;
}
ret = ahash_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
queue_kthread_work(&engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
/**
* crypto_transfer_request_to_engine - transfer one request to list into the
* engine queue
* crypto_transfer_hash_request_to_engine - transfer one request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_request_to_engine(struct crypto_engine *engine,
struct ablkcipher_request *req)
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req)
{
return crypto_transfer_request(engine, req, true);
return crypto_transfer_hash_request(engine, req, true);
}
EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
* crypto_finalize_request - finalize one request if the request is done
* crypto_finalize_cipher_request - finalize one request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_request(struct crypto_engine *engine,
struct ablkcipher_request *req, int err)
void crypto_finalize_cipher_request(struct crypto_engine *engine,
struct ablkcipher_request *req, int err)
{
unsigned long flags;
bool finalize_cur_req = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req)
if (engine->cur_req == &req->base)
finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (finalize_cur_req) {
if (engine->cur_req_prepared && engine->unprepare_request) {
ret = engine->unprepare_request(engine, req);
if (engine->cur_req_prepared &&
engine->unprepare_cipher_request) {
ret = engine->unprepare_cipher_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
req->base.complete(&req->base, err);
queue_kthread_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
/**
* crypto_finalize_hash_request - finalize one request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err)
{
unsigned long flags;
bool finalize_cur_req = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == &req->base)
finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (finalize_cur_req) {
if (engine->cur_req_prepared &&
engine->unprepare_hash_request) {
ret = engine->unprepare_hash_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
......@@ -212,7 +323,7 @@ void crypto_finalize_request(struct crypto_engine *engine,
queue_kthread_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_request);
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
* crypto_engine_start - start the hardware engine
......@@ -249,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start);
int crypto_engine_stop(struct crypto_engine *engine)
{
unsigned long flags;
unsigned limit = 500;
unsigned int limit = 500;
int ret = 0;
spin_lock_irqsave(&engine->queue_lock, flags);
......
......@@ -1178,12 +1178,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
goto err;
drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
if (!drbg->Vbuf)
if (!drbg->Vbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
if (!drbg->Cbuf)
if (!drbg->Cbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
/* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC)
......@@ -1199,8 +1203,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (0 < sb_size) {
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
if (!drbg->scratchpadbuf)
if (!drbg->scratchpadbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
......@@ -1917,6 +1923,8 @@ static inline int __init drbg_healthcheck_sanity(void)
return -ENOMEM;
mutex_init(&drbg->drbg_mutex);
drbg->core = &drbg_cores[coreref];
drbg->reseed_threshold = drbg_max_requests(drbg);
/*
* if the following tests fail, it is likely that there is a buffer
......@@ -1926,12 +1934,6 @@ static inline int __init drbg_healthcheck_sanity(void)
* grave bug.
*/
/* get a valid instance of DRBG for following tests */
ret = drbg_instantiate(drbg, NULL, coreref, pr);
if (ret) {
rc = ret;
goto outbuf;
}
max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1);
......@@ -1941,10 +1943,9 @@ static inline int __init drbg_healthcheck_sanity(void)
/* overflow max_bits */
len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
BUG_ON(0 < len);
drbg_uninstantiate(drbg);
/* overflow max addtllen with personalization string */
ret = drbg_instantiate(drbg, &addtl, coreref, pr);
ret = drbg_seed(drbg, &addtl, false);
BUG_ON(0 == ret);
/* all tests passed */
rc = 0;
......@@ -1952,9 +1953,7 @@ static inline int __init drbg_healthcheck_sanity(void)
pr_devel("DRBG: Sanity tests for failure code paths successfully "
"completed\n");
drbg_uninstantiate(drbg);
outbuf:
kzfree(drbg);
kfree(drbg);
return rc;
}
......@@ -2006,7 +2005,7 @@ static int __init drbg_init(void)
{
unsigned int i = 0; /* pointer to drbg_algs */
unsigned int j = 0; /* pointer to drbg_cores */
int ret = -EFAULT;
int ret;
ret = drbg_healthcheck_sanity();
if (ret)
......@@ -2016,7 +2015,7 @@ static int __init drbg_init(void)
pr_info("DRBG: Cannot register all DRBG types"
"(slots needed: %zu, slots available: %zu)\n",
ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
return ret;
return -EFAULT;
}
/*
......
......@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_skcipher *ctr = ctx->ctr;
struct {
be128 hash;
u8 iv[8];
u8 iv[16];
struct crypto_gcm_setkey_result result;
......
......@@ -14,24 +14,13 @@
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
......
......@@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
int ahash_mcryptd_digest(struct ahash_request *desc)
{
int err;
err = crypto_ahash_init(desc) ?:
ahash_mcryptd_finup(desc);
return err;
return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
}
int ahash_mcryptd_update(struct ahash_request *desc)
......
......@@ -35,8 +35,8 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
n_sz--;
}
/* In FIPS mode only allow key size 2K & 3K */
if (n_sz != 256 && n_sz != 384) {
/* In FIPS mode only allow key size 2K and higher */
if (n_sz < 256) {
pr_err("RSA: key size not allowed in FIPS mode\n");
return -EINVAL;
}
......
......@@ -209,16 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state;
struct ahash_request *req;
int statesize, ret = -EINVAL;
const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
req = *preq;
statesize = crypto_ahash_statesize(
crypto_ahash_reqtfm(req));
state = kmalloc(statesize, GFP_KERNEL);
state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
if (!state) {
pr_err("alt: hash: Failed to alloc state for %s\n", algo);
goto out_nostate;
}
memcpy(state + statesize, guard, sizeof(guard));
ret = crypto_ahash_export(req, state);
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alt: hash: Failed to export() for %s\n", algo);
goto out;
......@@ -665,7 +668,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen);
if (!ret == template[i].fail) {
if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
......@@ -770,7 +773,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen);
if (!ret == template[i].fail) {
if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
......@@ -1008,6 +1011,9 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
if (template[i].np)
continue;
if (fips_enabled && template[i].fips_skip)
continue;
j++;
ret = -EINVAL;
......@@ -1023,7 +1029,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
if (template[i].fail == !ret) {
printk(KERN_ERR "alg: cipher: setkey failed "
"on test %d for %s: flags=%x\n", j,
algo, crypto_cipher_get_flags(tfm));
......@@ -1112,6 +1118,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (template[i].np && !template[i].also_non_np)
continue;
if (fips_enabled && template[i].fips_skip)
continue;
if (template[i].iv)
memcpy(iv, template[i].iv, ivsize);
else
......@@ -1133,7 +1142,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm));
goto out;
......@@ -1198,6 +1207,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (!template[i].np)
continue;
if (fips_enabled && template[i].fips_skip)
continue;
if (template[i].iv)
memcpy(iv, template[i].iv, ivsize);
else
......@@ -1211,7 +1223,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm));
goto out;
......
......@@ -59,6 +59,7 @@ struct hash_testvec {
* @tap: How to distribute data in @np SGs
* @also_non_np: if set to 1, the test will be also done without
* splitting data in @np SGs
* @fips_skip: Skip the test vector in FIPS mode
*/
struct cipher_testvec {
......@@ -75,6 +76,7 @@ struct cipher_testvec {
unsigned char klen;
unsigned short ilen;
unsigned short rlen;
bool fips_skip;
};
struct aead_testvec {
......@@ -18224,6 +18226,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32,
.fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x00\x00\x00\x00\x00\x00\x00\x00"
......@@ -18566,6 +18569,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32,
.fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
......@@ -24,6 +24,10 @@
#include <linux/preempt.h>
#include <asm/xor.h>
#ifndef XOR_SELECT_TEMPLATE
#define XOR_SELECT_TEMPLATE(x) (x)
#endif
/* The xor routines to use. */
static struct xor_block_template *active_template;
......@@ -109,6 +113,15 @@ calibrate_xor_blocks(void)
void *b1, *b2;
struct xor_block_template *f, *fastest;
fastest = XOR_SELECT_TEMPLATE(NULL);
if (fastest) {
printk(KERN_INFO "xor: automatically using best "
"checksumming function %-10s\n",
fastest->name);
goto out;
}
/*
* Note: Since the memory is not actually used for _anything_ but to
* test the XOR speed, we don't really want kmemcheck to warn about
......@@ -126,36 +139,22 @@ calibrate_xor_blocks(void)
* all the possible functions, just test the best one
*/
fastest = NULL;
#ifdef XOR_SELECT_TEMPLATE
fastest = XOR_SELECT_TEMPLATE(fastest);
#endif
#define xor_speed(templ) do_xor_speed((templ), b1, b2)
if (fastest) {
printk(KERN_INFO "xor: automatically using best "
"checksumming function:\n");
xor_speed(fastest);
goto out;
} else {
printk(KERN_INFO "xor: measuring software checksum speed\n");
XOR_TRY_TEMPLATES;
fastest = template_list;
for (f = fastest; f; f = f->next)
if (f->speed > fastest->speed)
fastest = f;
}
printk(KERN_INFO "xor: measuring software checksum speed\n");
XOR_TRY_TEMPLATES;
fastest = template_list;
for (f = fastest; f; f = f->next)
if (f->speed > fastest->speed)
fastest = f;
printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
fastest->name, fastest->speed / 1000, fastest->speed % 1000);
#undef xor_speed
out:
free_pages((unsigned long)b1, 2);
out:
active_template = fastest;
return 0;
}
......
......@@ -5,7 +5,7 @@
*
* Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
*
* Based om ecb.c
* Based on ecb.c
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
......
......@@ -410,6 +410,19 @@ config HW_RANDOM_MESON
If unsure, say Y.
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Cavium SoCs.
To compile this driver as a module, choose M here: the
module will be called cavium_rng.
If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM
......
......@@ -35,3 +35,4 @@ obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
......@@ -24,16 +24,18 @@
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <asm/io.h>
#define DRV_NAME "AMD768-HWRNG"
#define PFX KBUILD_MODNAME ": "
#define RNGDATA 0x00
#define RNGDONE 0x04
#define PMBASE_OFFSET 0xF0
#define PMBASE_SIZE 8
/*
* Data for PCI driver interface
......@@ -50,72 +52,84 @@ static const struct pci_device_id pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct pci_dev *amd_pdev;
struct amd768_priv {
void __iomem *iobase;
struct pci_dev *pcidev;
};
static int amd_rng_data_present(struct hwrng *rng, int wait)
static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
u32 pmbase = (u32)rng->priv;
int data, i;
for (i = 0; i < 20; i++) {
data = !!(inl(pmbase + 0xF4) & 1);
if (data || !wait)
break;
udelay(10);
u32 *data = buf;
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
size_t read = 0;
/* We will wait at maximum one time per read */
int timeout = max / 4 + 1;
/*
* RNG data is available when RNGDONE is set to 1
* New random numbers are generated approximately 128 microseconds
* after RNGDATA is read
*/
while (read < max) {
if (ioread32(priv->iobase + RNGDONE) == 0) {
if (wait) {
/* Delay given by datasheet */
usleep_range(128, 196);
if (timeout-- == 0)
return read;
} else {
return 0;
}
} else {
*data = ioread32(priv->iobase + RNGDATA);
data++;
read += 4;
}
}
return data;
}
static int amd_rng_data_read(struct hwrng *rng, u32 *data)
{
u32 pmbase = (u32)rng->priv;
*data = inl(pmbase + 0xF0);
return 4;
return read;
}
static int amd_rng_init(struct hwrng *rng)
{
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
u8 rnen;
pci_read_config_byte(amd_pdev, 0x40, &rnen);
rnen |= (1 << 7); /* RNG on */
pci_write_config_byte(amd_pdev, 0x40, rnen);
pci_read_config_byte(priv->pcidev, 0x40, &rnen);
rnen |= BIT(7); /* RNG on */
pci_write_config_byte(priv->pcidev, 0x40, rnen);
pci_read_config_byte(amd_pdev, 0x41, &rnen);
rnen |= (1 << 7); /* PMIO enable */
pci_write_config_byte(amd_pdev, 0x41, rnen);
pci_read_config_byte(priv->pcidev, 0x41, &rnen);
rnen |= BIT(7); /* PMIO enable */
pci_write_config_byte(priv->pcidev, 0x41, rnen);
return 0;
}
static void amd_rng_cleanup(struct hwrng *rng)
{
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
u8 rnen;
pci_read_config_byte(amd_pdev, 0x40, &rnen);
rnen &= ~(1 << 7); /* RNG off */
pci_write_config_byte(amd_pdev, 0x40, rnen);
pci_read_config_byte(priv->pcidev, 0x40, &rnen);
rnen &= ~BIT(7); /* RNG off */
pci_write_config_byte(priv->pcidev, 0x40, rnen);
}
static struct hwrng amd_rng = {
.name = "amd",
.init = amd_rng_init,
.cleanup = amd_rng_cleanup,
.data_present = amd_rng_data_present,
.data_read = amd_rng_data_read,
.read = amd_rng_read,
};
static int __init mod_init(void)
{
int err = -ENODEV;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
u32 pmbase;
struct amd768_priv *priv;
for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev);
......@@ -123,42 +137,44 @@ static int __init mod_init(void)
goto found;
}
/* Device not found. */
goto out;
return -ENODEV;
found:
err = pci_read_config_dword(pdev, 0x58, &pmbase);
if (err)
goto out;
err = -EIO;
return err;
pmbase &= 0x0000FF00;
if (pmbase == 0)
goto out;
if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
return -EIO;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
PMBASE_SIZE, DRV_NAME)) {
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
pmbase + 0xF0);
err = -EBUSY;
goto out;
return -EBUSY;
}
amd_rng.priv = (unsigned long)pmbase;
amd_pdev = pdev;
pr_info("AMD768 RNG detected\n");
err = hwrng_register(&amd_rng);
if (err) {
pr_err(PFX "RNG registering failed (%d)\n",
err);
release_region(pmbase + 0xF0, 8);
goto out;
priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
PMBASE_SIZE);
if (!priv->iobase) {
pr_err(DRV_NAME "Cannot map ioport\n");
return -ENOMEM;
}
out:
return err;
amd_rng.priv = (unsigned long)priv;
priv->pcidev = pdev;
pr_info(DRV_NAME " detected\n");
return devm_hwrng_register(&pdev->dev, &amd_rng);
}
static void __exit mod_exit(void)
{
u32 pmbase = (unsigned long)amd_rng.priv;
release_region(pmbase + 0xF0, 8);
hwrng_unregister(&amd_rng);
}
module_init(mod_init);
......
......@@ -92,9 +92,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
bcm2835_rng_ops.priv = (unsigned long)rng_base;
rng_id = of_match_node(bcm2835_rng_of_match, np);
if (!rng_id)
if (!rng_id) {
iounmap(rng_base);
return -EINVAL;
}
/* Check for rng init function, execute it */
rng_setup = rng_id->data;
if (rng_setup)
......
/*
* Hardware Random Number Generator support for Cavium, Inc.
* Thunder processor family.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2016 Cavium, Inc.
*/
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
struct cavium_rng {
struct hwrng ops;
void __iomem *result;
};
/* Read data from the RNG unit */
static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
{
struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
unsigned int size = max;
while (size >= 8) {
*((u64 *)dat) = readq(p->result);
size -= 8;
dat += 8;
}
while (size > 0) {
*((u8 *)dat) = readb(p->result);
size--;
dat++;
}
return max;
}
/* Map Cavium RNG to an HWRNG object */
static int cavium_rng_probe_vf(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct cavium_rng *rng;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
/* Map the RNG result */
rng->result = pcim_iomap(pdev, 0, 0);
if (!rng->result) {
dev_err(&pdev->dev, "Error iomap failed retrieving result.\n");
return -ENOMEM;
}
rng->ops.name = "cavium rng";
rng->ops.read = cavium_rng_read;
rng->ops.quality = 1000;
pci_set_drvdata(pdev, rng);
ret = hwrng_register(&rng->ops);
if (ret) {
dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
return ret;
}
return 0;
}
/* Remove the VF */
void cavium_rng_remove_vf(struct pci_dev *pdev)
{
struct cavium_rng *rng;
rng = pci_get_drvdata(pdev);
hwrng_unregister(&rng->ops);
}
static const struct pci_device_id cavium_rng_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
{0,},
};
MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
static struct pci_driver cavium_rng_vf_driver = {
.name = "cavium_rng_vf",
.id_table = cavium_rng_vf_id_table,
.probe = cavium_rng_probe_vf,
.remove = cavium_rng_remove_vf,
};
module_pci_driver(cavium_rng_vf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
MODULE_LICENSE("GPL");
/*
* Hardware Random Number Generator support for Cavium Inc.
* Thunder processor family.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2016 Cavium, Inc.
*/
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#define THUNDERX_RNM_ENT_EN 0x1
#define THUNDERX_RNM_RNG_EN 0x2
struct cavium_rng_pf {
void __iomem *control_status;
};
/* Enable the RNG hardware and activate the VF */
static int cavium_rng_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct cavium_rng_pf *rng;
int iov_err;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
/*Map the RNG control */
rng->control_status = pcim_iomap(pdev, 0, 0);
if (!rng->control_status) {
dev_err(&pdev->dev,
"Error iomap failed retrieving control_status.\n");
return -ENOMEM;
}
/* Enable the RNG hardware and entropy source */
writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN,
rng->control_status);
pci_set_drvdata(pdev, rng);
/* Enable the Cavium RNG as a VF */
iov_err = pci_enable_sriov(pdev, 1);
if (iov_err != 0) {
/* Disable the RNG hardware and entropy source */
writeq(0, rng->control_status);
dev_err(&pdev->dev,
"Error initializing RNG virtual function,(%i).\n",
iov_err);
return iov_err;
}
return 0;
}
/* Disable VF and RNG Hardware */
void cavium_rng_remove(struct pci_dev *pdev)
{
struct cavium_rng_pf *rng;
rng = pci_get_drvdata(pdev);
/* Remove the VF */
pci_disable_sriov(pdev);
/* Disable the RNG hardware and entropy source */
writeq(0, rng->control_status);
}
static const struct pci_device_id cavium_rng_pf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */
{0,},
};
MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table);
static struct pci_driver cavium_rng_pf_driver = {
.name = "cavium_rng_pf",
.id_table = cavium_rng_pf_id_table,
.probe = cavium_rng_probe,
.remove = cavium_rng_remove,
};
module_pci_driver(cavium_rng_pf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
MODULE_LICENSE("GPL");
......@@ -449,22 +449,6 @@ int hwrng_register(struct hwrng *rng)
goto out;
mutex_lock(&rng_mutex);
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
err = -ENOMEM;
if (!rng_buffer) {
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_buffer)
goto out_unlock;
}
if (!rng_fillbuf) {
rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_fillbuf) {
kfree(rng_buffer);
goto out_unlock;
}
}
/* Must not register two RNGs with the same name. */
err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) {
......@@ -573,7 +557,26 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
static int __init hwrng_modinit(void)
{
return register_miscdev();
int ret = -ENOMEM;
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_buffer)
return -ENOMEM;
rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_fillbuf) {
kfree(rng_buffer);
return -ENOMEM;
}
ret = register_miscdev();
if (ret) {
kfree(rng_fillbuf);
kfree(rng_buffer);
}
return ret;
}
static void __exit hwrng_modexit(void)
......
......@@ -24,15 +24,12 @@
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <asm/io.h>
#define PFX KBUILD_MODNAME ": "
#define GEODE_RNG_DATA_REG 0x50
#define GEODE_RNG_STATUS_REG 0x54
......@@ -85,7 +82,6 @@ static struct hwrng geode_rng = {
static int __init mod_init(void)
{
int err = -ENODEV;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
void __iomem *mem;
......@@ -93,43 +89,27 @@ static int __init mod_init(void)
for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev);
if (ent)
goto found;
}
/* Device not found. */
goto out;
found:
rng_base = pci_resource_start(pdev, 0);
if (rng_base == 0)
goto out;
err = -ENOMEM;
mem = ioremap(rng_base, 0x58);
if (!mem)
goto out;
geode_rng.priv = (unsigned long)mem;
pr_info("AMD Geode RNG detected\n");
err = hwrng_register(&geode_rng);
if (err) {
pr_err(PFX "RNG registering failed (%d)\n",
err);
goto err_unmap;
if (ent) {
rng_base = pci_resource_start(pdev, 0);
if (rng_base == 0)
return -ENODEV;
mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
if (!mem)
return -ENOMEM;
geode_rng.priv = (unsigned long)mem;
pr_info("AMD Geode RNG detected\n");
return devm_hwrng_register(&pdev->dev, &geode_rng);
}
}
out:
return err;
err_unmap:
iounmap(mem);
goto out;
/* Device not found. */
return -ENODEV;
}
static void __exit mod_exit(void)
{
void __iomem *mem = (void __iomem *)geode_rng.priv;
hwrng_unregister(&geode_rng);
iounmap(mem);
}
module_init(mod_init);
......
......@@ -76,9 +76,6 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct meson_rng_data *data =
container_of(rng, struct meson_rng_data, rng);
if (max < sizeof(u32))
return 0;
*(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
return sizeof(u32);
......
......@@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret) {
if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap;
......@@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
if (ret) {
if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(dev);
return ret;
......
......@@ -71,12 +71,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
return 0;
}
static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
{
return 1;
}
static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
{
int r;
......@@ -88,8 +83,7 @@ static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
static struct hwrng omap3_rom_rng_ops = {
.name = "omap3-rom",
.data_present = omap3_rom_rng_data_present,
.data_read = omap3_rom_rng_data_read,
.read = omap3_rom_rng_read,
};
static int omap3_rom_rng_probe(struct platform_device *pdev)
......
......@@ -95,42 +95,20 @@ static struct hwrng pasemi_rng = {
.data_read = pasemi_rng_data_read,
};
static int rng_probe(struct platform_device *ofdev)
static int rng_probe(struct platform_device *pdev)
{
void __iomem *rng_regs;
struct device_node *rng_np = ofdev->dev.of_node;
struct resource res;
int err = 0;
struct resource *res;
err = of_address_to_resource(rng_np, 0, &res);
if (err)
return -ENODEV;
rng_regs = ioremap(res.start, 0x100);
if (!rng_regs)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rng_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rng_regs))
return PTR_ERR(rng_regs);
pasemi_rng.priv = (unsigned long)rng_regs;
pr_info("Registering PA Semi RNG\n");
err = hwrng_register(&pasemi_rng);
if (err)
iounmap(rng_regs);
return err;
}
static int rng_remove(struct platform_device *dev)
{
void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
hwrng_unregister(&pasemi_rng);
iounmap(rng_regs);
return 0;
return devm_hwrng_register(&pdev->dev, &pasemi_rng);
}
static const struct of_device_id rng_match[] = {
......@@ -146,7 +124,6 @@ static struct platform_driver rng_driver = {
.of_match_table = rng_match,
},
.probe = rng_probe,
.remove = rng_remove,
};
module_platform_driver(rng_driver);
......
......@@ -143,7 +143,6 @@ static struct platform_driver pic32_rng_driver = {
.remove = pic32_rng_remove,
.driver = {
.name = "pic32-rng",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_rng_of_match),
},
};
......
......@@ -54,9 +54,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
u32 status;
int i;
if (max < sizeof(u16))
return -EINVAL;
/* Wait until FIFO is full - max 4uS*/
for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) {
status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG);
......@@ -111,6 +108,7 @@ static int st_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&ddata->ops);
if (ret) {
dev_err(&pdev->dev, "Failed to register HW RNG\n");
clk_disable_unprepare(clk);
return ret;
}
......
......@@ -144,22 +144,13 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
}
platform_set_drvdata(dev, rngdev);
return hwrng_register(&rngdev->rng);
}
static int __exit tx4939_rng_remove(struct platform_device *dev)
{
struct tx4939_rng *rngdev = platform_get_drvdata(dev);
hwrng_unregister(&rngdev->rng);
return 0;
return devm_hwrng_register(&dev->dev, &rngdev->rng);
}
static struct platform_driver tx4939_rng_driver = {
.driver = {
.name = "tx4939-rng",
},
.remove = tx4939_rng_remove,
};
module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
......
......@@ -318,6 +318,9 @@ config CRYPTO_DEV_OMAP_AES
select CRYPTO_AES
select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_CTR
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
......
......@@ -111,6 +111,42 @@
#else
#define debug(format, arg...)
#endif
#ifdef DEBUG
#include <linux/highmem.h>
static void dbg_dump_sg(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
struct scatterlist *sg, size_t tlen, bool ascii,
bool may_sleep)
{
struct scatterlist *it;
void *it_page;
size_t len;
void *buf;
for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
/*
* make sure the scatterlist's page
* has a valid virtual memory mapping
*/
it_page = kmap_atomic(sg_page(it));
if (unlikely(!it_page)) {
printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
return;
}
buf = it_page + it->offset;
len = min(tlen, it->length);
print_hex_dump(level, prefix_str, prefix_type, rowsize,
groupsize, buf, len, ascii);
tlen -= len;
kunmap_atomic(it_page);
}
}
#endif
static struct list_head alg_list;
struct caam_alg_entry {
......@@ -227,8 +263,9 @@ static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
if (is_rfc3686) {
nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
enckeylen);
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc,
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
......@@ -500,11 +537,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
......@@ -578,11 +614,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
/* Choose operation */
if (ctr_mode)
......@@ -683,11 +718,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
......@@ -1478,7 +1512,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0;
u32 *key_jump_cmd;
u32 *desc;
u32 *nonce;
u8 *nonce;
u32 geniv;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
......@@ -1531,9 +1565,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
nonce = (u32 *)(key + keylen);
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
nonce = (u8 *)key + keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
......@@ -1549,11 +1584,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
/* Load operation */
append_operation(desc, ctx->class1_alg_type |
......@@ -1590,9 +1624,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
nonce = (u32 *)(key + keylen);
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
nonce = (u8 *)key + keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
......@@ -1608,11 +1643,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
/* Choose operation */
if (ctr_mode)
......@@ -1653,9 +1687,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
nonce = (u32 *)(key + keylen);
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
nonce = (u8 *)key + keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
......@@ -1685,11 +1720,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, (u32)1, LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT));
if (ctx1_iv_off)
append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
......@@ -1995,9 +2029,9 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
ablkcipher_unmap(jrdev, edesc, req);
......@@ -2027,9 +2061,9 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
ablkcipher_unmap(jrdev, edesc, req);
......@@ -2184,12 +2218,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents ? 100 : req->nbytes, 1);
printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif
len = desc_len(sh_desc);
......@@ -2241,12 +2278,14 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
edesc->src_nents ? 100 : req->nbytes, 1);
dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif
len = desc_len(sh_desc);
......@@ -2516,18 +2555,20 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
#ifdef DEBUG
bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
req->assoclen + req->cryptlen, 1, may_sleep);
#endif
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
&all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
req->assoclen + req->cryptlen, 1);
#endif
/* Create and submit job descriptor*/
init_authenc_job(req, edesc, all_contig, false);
#ifdef DEBUG
......
此差异已折叠。
......@@ -14,6 +14,7 @@
#include "jr.h"
#include "desc_constr.h"
#include "error.h"
#include "ctrl.h"
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
......@@ -826,6 +827,8 @@ static int caam_probe(struct platform_device *pdev)
caam_remove:
caam_remove(pdev);
return ret;
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
......
......@@ -23,13 +23,7 @@
#define SEC4_SG_OFFSET_MASK 0x00001fff
struct sec4_sg_entry {
#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
u32 rsvd1;
dma_addr_t ptr;
#else
u64 ptr;
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len;
u32 bpid_offset;
};
......
......@@ -324,6 +324,23 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
/*
* ee - endianness
* size - size of immediate type in bytes
*/
#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \
static inline void append_##cmd##_imm_##ee##size(u32 *desc, \
u##size immediate, \
u32 options) \
{ \
__##ee##size data = cpu_to_##ee##size(immediate); \
PRINT_POS; \
append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \
append_data(desc, &data, sizeof(data)); \
}
APPEND_CMD_RAW_IMM2(load, LOAD, be, 32);
/*
* Append math command. Only the last part of destination and source need to
* be specified
......
......@@ -41,7 +41,6 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
......
......@@ -73,8 +73,6 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
tasklet_kill(&jrp->irqtask);
/* Release interrupt */
free_irq(jrp->irq, dev);
......@@ -130,7 +128,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
* tasklet if jobs done.
* the threaded irq if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
......@@ -152,18 +150,13 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
preempt_disable();
tasklet_schedule(&jrp->irqtask);
preempt_enable();
return IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
/* Deferred service handler, run as interrupt-fired tasklet */
static void caam_jr_dequeue(unsigned long devarg)
static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
{
int hw_idx, sw_idx, i, head, tail;
struct device *dev = (struct device *)devarg;
struct device *dev = st_dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
......@@ -237,6 +230,8 @@ static void caam_jr_dequeue(unsigned long devarg)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
return IRQ_HANDLED;
}
/**
......@@ -394,11 +389,10 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
/* Connect job ring interrupt handler. */
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
dev_name(dev), dev);
error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
caam_jr_threadirq, IRQF_SHARED,
dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
......@@ -460,7 +454,6 @@ static int caam_jr_init(struct device *dev)
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
tasklet_kill(&jrp->irqtask);
return error;
}
......@@ -513,6 +506,7 @@ static int caam_jr_probe(struct platform_device *pdev)
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
irq_dispose_mapping(jrpriv->irq);
iounmap(ctrl);
return error;
}
......
......@@ -196,6 +196,14 @@ static inline u64 rd_reg64(void __iomem *reg)
#define caam_dma_to_cpu(value) caam32_to_cpu(value)
#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
#define cpu_to_caam_dma64(value) \
(((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
(u64)cpu_to_caam32(upper_32_bits(value)))
#else
#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
#endif
/*
* jr_outentry
* Represents each entry in a JobR output ring
......
......@@ -15,7 +15,7 @@ struct sec4_sg_entry;
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
sec4_sg_ptr->ptr = cpu_to_caam_dma(dma);
sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
sec4_sg_ptr->len = cpu_to_caam32(len);
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
#ifdef DEBUG
......
......@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o \
ccp-ops.o \
ccp-dev-v3.o \
ccp-dev-v5.o \
ccp-platform.o \
ccp-dmaengine.o
ccp-$(CONFIG_PCI) += ccp-pci.o
......
......@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
rctx->cmd.engine = CCP_ENGINE_SHA;
rctx->cmd.u.sha.type = rctx->type;
rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
switch (rctx->type) {
case CCP_SHA_TYPE_1:
rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_224:
rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_256:
rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
break;
default:
/* Should never get here */
break;
}
rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt;
rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
......
......@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -19,6 +20,61 @@
#include "ccp-dev.h"
static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
int start;
struct ccp_device *ccp = cmd_q->ccp;
for (;;) {
mutex_lock(&ccp->sb_mutex);
start = (u32)bitmap_find_next_zero_area(ccp->sb,
ccp->sb_count,
ccp->sb_start,
count, 0);
if (start <= ccp->sb_count) {
bitmap_set(ccp->sb, start, count);
mutex_unlock(&ccp->sb_mutex);
break;
}
ccp->sb_avail = 0;
mutex_unlock(&ccp->sb_mutex);
/* Wait for KSB entries to become available */
if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
return 0;
}
return KSB_START + start;
}
static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
unsigned int count)
{
struct ccp_device *ccp = cmd_q->ccp;
if (!start)
return;
mutex_lock(&ccp->sb_mutex);
bitmap_clear(ccp->sb, start - KSB_START, count);
ccp->sb_avail = 1;
mutex_unlock(&ccp->sb_mutex);
wake_up_interruptible_all(&ccp->sb_queue);
}
static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
{
return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
}
static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
{
struct ccp_cmd_queue *cmd_q = op->cmd_q;
......@@ -68,6 +124,9 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
/* On error delete all related jobs from the queue */
cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
| op->jobid;
if (cmd_q->cmd_error)
ccp_log_error(cmd_q->ccp,
cmd_q->cmd_error);
iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
......@@ -99,10 +158,10 @@ static int ccp_perform_aes(struct ccp_op *op)
| (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
| (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
| (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
| (op->ksb_key << REQ1_KEY_KSB_SHIFT);
| (op->sb_key << REQ1_KEY_KSB_SHIFT);
cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma);
......@@ -129,10 +188,10 @@ static int ccp_perform_xts_aes(struct ccp_op *op)
cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
| (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
| (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
| (op->ksb_key << REQ1_KEY_KSB_SHIFT);
| (op->sb_key << REQ1_KEY_KSB_SHIFT);
cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma);
......@@ -158,7 +217,7 @@ static int ccp_perform_sha(struct ccp_op *op)
| REQ1_INIT;
cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
......@@ -181,11 +240,11 @@ static int ccp_perform_rsa(struct ccp_op *op)
/* Fill out the register contents for REQ1 through REQ6 */
cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
| (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
| (op->ksb_key << REQ1_KEY_KSB_SHIFT)
| (op->sb_key << REQ1_KEY_KSB_SHIFT)
| REQ1_EOM;
cr[1] = op->u.rsa.input_len - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma);
......@@ -215,10 +274,10 @@ static int ccp_perform_passthru(struct ccp_op *op)
| ccp_addr_hi(&op->src.u.dma);
if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
} else {
cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
cr[2] = op->src.u.sb * CCP_SB_BYTES;
cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
}
if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
......@@ -226,8 +285,8 @@ static int ccp_perform_passthru(struct ccp_op *op)
cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->dst.u.dma);
} else {
cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
cr[4] = op->dst.u.sb * CCP_SB_BYTES;
cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
}
if (op->eom)
......@@ -256,35 +315,6 @@ static int ccp_perform_ecc(struct ccp_op *op)
return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
}
static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
int len = min_t(int, sizeof(trng_value), max);
/*
* Locking is provided by the caller so we can update device
* hwrng-related fields safely
*/
trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
if (!trng_value) {
/* Zero is returned if not data is available or if a
* bad-entropy error is present. Assume an error if
* we exceed TRNG_RETRIES reads of zero.
*/
if (ccp->hwrng_retries++ > TRNG_RETRIES)
return -EIO;
return 0;
}
/* Reset the counter and save the rng value */
ccp->hwrng_retries = 0;
memcpy(data, &trng_value, len);
return len;
}
static int ccp_init(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
......@@ -321,9 +351,9 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q->dma_pool = dma_pool;
/* Reserve 2 KSB regions for the queue */
cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
ccp->ksb_count -= 2;
cmd_q->sb_key = KSB_START + ccp->sb_start++;
cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
ccp->sb_count -= 2;
/* Preset some register values and masks that are queue
* number dependent
......@@ -335,7 +365,7 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q->int_ok = 1 << (i * 2);
cmd_q->int_err = 1 << ((i * 2) + 1);
cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
cmd_q->free_slots = ccp_get_free_slots(cmd_q);
init_waitqueue_head(&cmd_q->int_queue);
......@@ -375,9 +405,10 @@ static int ccp_init(struct ccp_device *ccp)
}
/* Initialize the queues used to wait for KSB space and suspend */
init_waitqueue_head(&ccp->ksb_queue);
init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue);
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
struct task_struct *kthread;
......@@ -397,29 +428,26 @@ static int ccp_init(struct ccp_device *ccp)
wake_up_process(kthread);
}
/* Register the RNG */
ccp->hwrng.name = ccp->rngname;
ccp->hwrng.read = ccp_trng_read;
ret = hwrng_register(&ccp->hwrng);
if (ret) {
dev_err(dev, "error registering hwrng (%d)\n", ret);
dev_dbg(dev, "Enabling interrupts...\n");
/* Enable interrupts */
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
dev_dbg(dev, "Registering device...\n");
ccp_add_device(ccp);
ret = ccp_register_rng(ccp);
if (ret)
goto e_kthread;
}
/* Register the DMA engine support */
ret = ccp_dmaengine_register(ccp);
if (ret)
goto e_hwrng;
ccp_add_device(ccp);
/* Enable interrupts */
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
return 0;
e_hwrng:
hwrng_unregister(&ccp->hwrng);
ccp_unregister_rng(ccp);
e_kthread:
for (i = 0; i < ccp->cmd_q_count; i++)
......@@ -441,19 +469,14 @@ static void ccp_destroy(struct ccp_device *ccp)
struct ccp_cmd *cmd;
unsigned int qim, i;
/* Remove this device from the list of available units first */
ccp_del_device(ccp);
/* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp);
/* Unregister the RNG */
hwrng_unregister(&ccp->hwrng);
ccp_unregister_rng(ccp);
/* Stop the queue kthreads */
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
/* Remove this device from the list of available units */
ccp_del_device(ccp);
/* Build queue interrupt mask (two interrupt masks per queue) */
qim = 0;
......@@ -472,6 +495,11 @@ static void ccp_destroy(struct ccp_device *ccp)
}
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
/* Stop the queue kthreads */
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
ccp->free_irq(ccp);
for (i = 0; i < ccp->cmd_q_count; i++)
......@@ -527,18 +555,24 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
}
static const struct ccp_actions ccp3_actions = {
.perform_aes = ccp_perform_aes,
.perform_xts_aes = ccp_perform_xts_aes,
.perform_sha = ccp_perform_sha,
.perform_rsa = ccp_perform_rsa,
.perform_passthru = ccp_perform_passthru,
.perform_ecc = ccp_perform_ecc,
.aes = ccp_perform_aes,
.xts_aes = ccp_perform_xts_aes,
.sha = ccp_perform_sha,
.rsa = ccp_perform_rsa,
.passthru = ccp_perform_passthru,
.ecc = ccp_perform_ecc,
.sballoc = ccp_alloc_ksb,
.sbfree = ccp_free_ksb,
.init = ccp_init,
.destroy = ccp_destroy,
.get_free_slots = ccp_get_free_slots,
.irqhandler = ccp_irq_handler,
};
struct ccp_vdata ccpv3 = {
const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions,
.bar = 2,
.offset = 0x20000,
};
此差异已折叠。
......@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -39,6 +40,59 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
/* Human-readable error strings */
char *ccp_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 02: ILLEGAL_KEY_ID",
"ERR 03: ILLEGAL_FUNCTION_TYPE",
"ERR 04: ILLEGAL_FUNCTION_MODE",
"ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
"ERR 06: ILLEGAL_FUNCTION_SIZE",
"ERR 07: Zlib_MISSING_INIT_EOM",
"ERR 08: ILLEGAL_FUNCTION_RSVD",
"ERR 09: ILLEGAL_BUFFER_LENGTH",
"ERR 10: VLSB_FAULT",
"ERR 11: ILLEGAL_MEM_ADDR",
"ERR 12: ILLEGAL_MEM_SEL",
"ERR 13: ILLEGAL_CONTEXT_ID",
"ERR 14: ILLEGAL_KEY_ADDR",
"ERR 15: 0xF Reserved",
"ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
"ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
"ERR 18: CMD_TIMEOUT",
"ERR 19: IDMA0_AXI_SLVERR",
"ERR 20: IDMA0_AXI_DECERR",
"ERR 21: 0x15 Reserved",
"ERR 22: IDMA1_AXI_SLAVE_FAULT",
"ERR 23: IDMA1_AIXI_DECERR",
"ERR 24: 0x18 Reserved",
"ERR 25: ZLIBVHB_AXI_SLVERR",
"ERR 26: ZLIBVHB_AXI_DECERR",
"ERR 27: 0x1B Reserved",
"ERR 27: ZLIB_UNEXPECTED_EOM",
"ERR 27: ZLIB_EXTRA_DATA",
"ERR 30: ZLIB_BTYPE",
"ERR 31: ZLIB_UNDEFINED_SYMBOL",
"ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
"ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
"ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
"ERR 35: ZLIB_UNCOMPRESSED_LEN",
"ERR 36: ZLIB_LIMIT_REACHED",
"ERR 37: ZLIB_CHECKSUM_MISMATCH0",
"ERR 38: ODMA0_AXI_SLVERR",
"ERR 39: ODMA0_AXI_DECERR",
"ERR 40: 0x28 Reserved",
"ERR 41: ODMA1_AXI_SLVERR",
"ERR 42: ODMA1_AXI_DECERR",
"ERR 43: LSB_PARITY_ERR",
};
void ccp_log_error(struct ccp_device *d, int e)
{
dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
}
/* List of CCPs, CCP count, read-write access lock, and access functions
*
* Lock structure: get ccp_unit_lock for reading whenever we need to
......@@ -58,7 +112,7 @@ static struct ccp_device *ccp_rr;
/* Ever-increasing value to produce unique unit numbers */
static atomic_t ccp_unit_ordinal;
unsigned int ccp_increment_unit_ordinal(void)
static unsigned int ccp_increment_unit_ordinal(void)
{
return atomic_inc_return(&ccp_unit_ordinal);
}
......@@ -118,6 +172,29 @@ void ccp_del_device(struct ccp_device *ccp)
write_unlock_irqrestore(&ccp_unit_lock, flags);
}
int ccp_register_rng(struct ccp_device *ccp)
{
int ret = 0;
dev_dbg(ccp->dev, "Registering RNG...\n");
/* Register an RNG */
ccp->hwrng.name = ccp->rngname;
ccp->hwrng.read = ccp_trng_read;
ret = hwrng_register(&ccp->hwrng);
if (ret)
dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
return ret;
}
void ccp_unregister_rng(struct ccp_device *ccp)
{
if (ccp->hwrng.name)
hwrng_unregister(&ccp->hwrng);
}
static struct ccp_device *ccp_get_device(void)
{
unsigned long flags;
......@@ -397,9 +474,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
spin_lock_init(&ccp->cmd_lock);
mutex_init(&ccp->req_mutex);
mutex_init(&ccp->ksb_mutex);
ccp->ksb_count = KSB_COUNT;
ccp->ksb_start = 0;
mutex_init(&ccp->sb_mutex);
ccp->sb_count = KSB_COUNT;
ccp->sb_start = 0;
ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
......@@ -408,6 +485,34 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
return ccp;
}
int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
int len = min_t(int, sizeof(trng_value), max);
/* Locking is provided by the caller so we can update device
* hwrng-related fields safely
*/
trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
if (!trng_value) {
/* Zero is returned if not data is available or if a
* bad-entropy error is present. Assume an error if
* we exceed TRNG_RETRIES reads of zero.
*/
if (ccp->hwrng_retries++ > TRNG_RETRIES)
return -EIO;
return 0;
}
/* Reset the counter and save the rng value */
ccp->hwrng_retries = 0;
memcpy(data, &trng_value, len);
return len;
}
#ifdef CONFIG_PM
bool ccp_queues_suspended(struct ccp_device *ccp)
{
......
此差异已折叠。
......@@ -299,12 +299,10 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
{
struct ccp_dma_desc *desc;
desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
if (!desc)
return NULL;
memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit;
......@@ -650,8 +648,11 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
ccp->name);
if (!dma_cmd_cache_name)
return -ENOMEM;
if (!dma_desc_cache_name) {
ret = -ENOMEM;
goto err_cache;
}
ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
sizeof(struct ccp_dma_desc),
sizeof(void *),
......
此差异已折叠。
......@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -25,9 +26,6 @@
#include "ccp-dev.h"
#define IO_BAR 2
#define IO_OFFSET 0x20000
#define MSIX_VECTORS 2
struct ccp_msix {
......@@ -143,10 +141,11 @@ static void ccp_free_irqs(struct ccp_device *ccp)
free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
dev);
pci_disable_msix(pdev);
} else {
} else if (ccp->irq) {
free_irq(ccp->irq, dev);
pci_disable_msi(pdev);
}
ccp->irq = 0;
}
static int ccp_find_mmio_area(struct ccp_device *ccp)
......@@ -156,10 +155,11 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
resource_size_t io_len;
unsigned long io_flags;
io_flags = pci_resource_flags(pdev, IO_BAR);
io_len = pci_resource_len(pdev, IO_BAR);
if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
return IO_BAR;
io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
io_len = pci_resource_len(pdev, ccp->vdata->bar);
if ((io_flags & IORESOURCE_MEM) &&
(io_len >= (ccp->vdata->offset + 0x800)))
return ccp->vdata->bar;
return -EIO;
}
......@@ -216,7 +216,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(dev, "pci_iomap failed\n");
goto e_device;
}
ccp->io_regs = ccp->io_map + IO_OFFSET;
ccp->io_regs = ccp->io_map + ccp->vdata->offset;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
......@@ -230,6 +230,9 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(dev, ccp);
if (ccp->vdata->setup)
ccp->vdata->setup(ccp);
ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_iomap;
......@@ -322,6 +325,8 @@ static int ccp_pci_resume(struct pci_dev *pdev)
static const struct pci_device_id ccp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
/* Last entry must be zero */
{ 0, }
};
......
......@@ -636,20 +636,12 @@ struct hifn_request_context {
static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
{
u32 ret;
ret = readl(dev->bar[0] + reg);
return ret;
return readl(dev->bar[0] + reg);
}
static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
{
u32 ret;
ret = readl(dev->bar[1] + reg);
return ret;
return readl(dev->bar[1] + reg);
}
static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
......
......@@ -71,6 +71,7 @@
#define DRIVER_FLAGS_MD5 BIT(21)
#define IMG_HASH_QUEUE_LENGTH 20
#define IMG_HASH_DMA_BURST 4
#define IMG_HASH_DMA_THRESHOLD 64
#ifdef __LITTLE_ENDIAN
......@@ -102,8 +103,10 @@ struct img_hash_request_ctx {
unsigned long op;
size_t bufcnt;
u8 buffer[0] __aligned(sizeof(u32));
struct ahash_request fallback_req;
/* Zero length buffer must remain last member of struct */
u8 buffer[0] __aligned(sizeof(u32));
};
struct img_hash_ctx {
......@@ -340,7 +343,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->bus_addr;
dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_conf.dst_maxburst = 16;
dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
dma_conf.device_fc = false;
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
......@@ -361,7 +364,7 @@ static void img_hash_dma_task(unsigned long d)
size_t nbytes, bleft, wsend, len, tbc;
struct scatterlist tsg;
if (!ctx->sg)
if (!hdev->req || !ctx->sg)
return;
addr = sg_virt(ctx->sg);
......@@ -587,6 +590,32 @@ static int img_hash_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->fallback_req);
}
static int img_hash_import(struct ahash_request *req, const void *in)
{
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
rctx->fallback_req.base.flags = req->base.flags
& CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_ahash_import(&rctx->fallback_req, in);
}
static int img_hash_export(struct ahash_request *req, void *out)
{
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
rctx->fallback_req.base.flags = req->base.flags
& CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_ahash_export(&rctx->fallback_req, out);
}
static int img_hash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
......@@ -643,10 +672,9 @@ static int img_hash_digest(struct ahash_request *req)
return err;
}
static int img_hash_cra_init(struct crypto_tfm *tfm)
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
{
struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
const char *alg_name = crypto_tfm_alg_name(tfm);
int err = -ENOMEM;
ctx->fallback = crypto_alloc_ahash(alg_name, 0,
......@@ -658,6 +686,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct img_hash_request_ctx) +
crypto_ahash_reqsize(ctx->fallback) +
IMG_HASH_DMA_THRESHOLD);
return 0;
......@@ -666,6 +695,26 @@ static int img_hash_cra_init(struct crypto_tfm *tfm)
return err;
}
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
return img_hash_cra_init(tfm, "md5-generic");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
{
return img_hash_cra_init(tfm, "sha1-generic");
}
static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
{
return img_hash_cra_init(tfm, "sha224-generic");
}
static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
{
return img_hash_cra_init(tfm, "sha256-generic");
}
static void img_hash_cra_exit(struct crypto_tfm *tfm)
{
struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
......@@ -711,9 +760,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
.export = img_hash_export,
.import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "img-md5",
......@@ -723,7 +775,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
.cra_init = img_hash_cra_init,
.cra_init = img_hash_cra_md5_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
......@@ -734,9 +786,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
.export = img_hash_export,
.import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "img-sha1",
......@@ -746,7 +801,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
.cra_init = img_hash_cra_init,
.cra_init = img_hash_cra_sha1_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
......@@ -757,9 +812,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
.export = img_hash_export,
.import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "img-sha224",
......@@ -769,7 +827,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
.cra_init = img_hash_cra_init,
.cra_init = img_hash_cra_sha224_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
......@@ -780,9 +838,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
.export = img_hash_export,
.import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "img-sha256",
......@@ -792,7 +853,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
.cra_init = img_hash_cra_init,
.cra_init = img_hash_cra_sha256_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
......@@ -971,7 +1032,7 @@ static int img_hash_probe(struct platform_device *pdev)
err = img_register_algs(hdev);
if (err)
goto err_algs;
dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
return 0;
......@@ -1013,11 +1074,38 @@ static int img_hash_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int img_hash_suspend(struct device *dev)
{
struct img_hash_dev *hdev = dev_get_drvdata(dev);
clk_disable_unprepare(hdev->hash_clk);
clk_disable_unprepare(hdev->sys_clk);
return 0;
}
static int img_hash_resume(struct device *dev)
{
struct img_hash_dev *hdev = dev_get_drvdata(dev);
clk_prepare_enable(hdev->hash_clk);
clk_prepare_enable(hdev->sys_clk);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops img_hash_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
};
static struct platform_driver img_hash_driver = {
.probe = img_hash_probe,
.remove = img_hash_remove,
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
.of_match_table = of_match_ptr(img_hash_match),
}
};
......
......@@ -447,9 +447,8 @@ static int init_ixp_crypto(struct device *dev)
if (!npe_running(npe_c)) {
ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
if (ret) {
return ret;
}
if (ret)
goto npe_release;
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
} else {
......@@ -473,7 +472,8 @@ static int init_ixp_crypto(struct device *dev)
default:
printk(KERN_ERR "Firmware of %s lacks crypto support\n",
npe_name(npe_c));
return -ENODEV;
ret = -ENODEV;
goto npe_release;
}
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
......@@ -512,6 +512,7 @@ static int init_ixp_crypto(struct device *dev)
err:
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release:
npe_release(npe_c);
return ret;
}
......
......@@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
if (!req)
break;
ctx = crypto_tfm_ctx(req->tfm);
mv_cesa_complete_req(ctx, req, 0);
}
}
......
此差异已折叠。
......@@ -261,6 +261,7 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
tdma->op = op;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = cpu_to_le32(dma_handle);
tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
return op;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册