while (total > 0) {
size = min(total, PAGE_SIZE);
ret = copy_from_user((void *)xbuf[0],
- user_ptr(crypt_req->plaintext), size);
+ (void __user *)crypt_req->plaintext, size);
if (ret) {
ret = -EFAULT;
pr_debug("%s: copy_from_user failed (%d)\n", __func__, ret);
goto process_req_buf_out;
}
- ret = copy_to_user(user_ptr(crypt_req->result),
+ ret = copy_to_user((void __user *)crypt_req->result,
(const void *)xbuf[1], size);
if (ret) {
ret = -EFAULT;
}
total -= size;
- /* no need to used user_ptr, we are incrementing the
- ptr value in their respective variable*/
crypt_req->result += size;
crypt_req->plaintext += size;
}
hash_buff = xbuf[0];
- memcpy(hash_buff, (char *)user_ptr(rsa_req->message), rsa_req->msg_len);
+ memcpy(hash_buff, rsa_req->message, rsa_req->msg_len);
sg_init_one(&sg[0], hash_buff, rsa_req->msg_len);
if (!rsa_req->skip_key) {
ret = crypto_ahash_setkey(tfm,
- user_ptr(rsa_req->key), rsa_req->keylen);
+ rsa_req->key, rsa_req->keylen);
if (ret) {
pr_err("alg: hash: setkey failed\n");
goto rsa_fail;
goto rsa_fail;
}
- ret = copy_to_user(user_ptr(rsa_req->result), (const void *)result,
+ ret = copy_to_user((void __user *)rsa_req->result, (const void *)result,
crypto_ahash_digestsize(tfm));
if (ret) {
ret = -EFAULT;
unsigned long *xbuf[XBUFSIZE];
int ret = -ENOMEM;
- tfm = crypto_alloc_ahash(user_ptr(sha_req->algo), 0, 0);
+ tfm = crypto_alloc_ahash(sha_req->algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("alg:hash:Failed to load transform for %s:%ld\n",
- (char *)user_ptr(sha_req->algo), PTR_ERR(tfm));
+ sha_req->algo, PTR_ERR(tfm));
goto out_alloc;
}
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg:hash:Failed to allocate request for %s\n",
- (char *)user_ptr(sha_req->algo));
+ sha_req->algo);
goto out_noreq;
}
hash_buff = xbuf[0];
- memcpy(hash_buff, user_ptr(sha_req->plaintext), sha_req->plaintext_sz);
+ memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz);
sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz);
if (sha_req->keylen) {
sha_req->keylen);
if (ret) {
pr_err("alg:hash:setkey failed on %s:ret=%d\n",
- (char *)user_ptr(sha_req->algo), ret);
+ sha_req->algo, ret);
goto out;
}
ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req));
if (ret) {
pr_err("alg: hash: init failed for %s: ret=%d\n",
- (char *)user_ptr(sha_req->algo), ret);
+ sha_req->algo, ret);
goto out;
}
ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req));
if (ret) {
pr_err("alg: hash: update failed for %s: ret=%d\n",
- (char *)user_ptr(sha_req->algo), ret);
+ sha_req->algo, ret);
goto out;
}
ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req));
if (ret) {
pr_err("alg: hash: final failed for %s: ret=%d\n",
- (char *)user_ptr(sha_req->algo), ret);
+ sha_req->algo, ret);
goto out;
}
- ret = copy_to_user(user_ptr(sha_req->result),
+ ret = copy_to_user((void __user *)sha_req->result,
(const void *)result, crypto_ahash_digestsize(tfm));
if (ret) {
ret = -EFAULT;
pr_err("alg: hash: copy_to_user failed (%d) for %s\n",
- ret, (char *)user_ptr(sha_req->algo));
+ ret, sha_req->algo);
}
out:
struct tegra_rng_req rng_req;
struct tegra_sha_req sha_req;
struct tegra_rsa_req rsa_req;
+#ifdef CONFIG_COMPAT
+ struct tegra_crypt_req_32 crypt_req_32;
+ struct tegra_rng_req_32 rng_req_32;
+ struct tegra_sha_req_32 sha_req_32;
+ struct tegra_rsa_req_32 rsa_req_32;
+ int i = 0;
+#endif
char *rng;
int ret = 0;
ctx->use_ssk = (int)arg;
break;
+#ifdef CONFIG_COMPAT
+ case TEGRA_CRYPTO_IOCTL_PROCESS_REQ_32:
+ ret = copy_from_user(&crypt_req_32, (void __user *)arg,
+ sizeof(crypt_req_32));
+
+ crypt_req.op = crypt_req_32.op;
+ crypt_req.encrypt = crypt_req_32.encrypt;
+ crypt_req.skip_key = crypt_req_32.skip_key;
+ crypt_req.skip_iv = crypt_req_32.skip_iv;
+ for (i = 0; i < crypt_req_32.keylen; i++)
+ crypt_req.key[i] = crypt_req_32.key[i];
+ crypt_req.keylen = crypt_req_32.keylen;
+ for (i = 0; i < TEGRA_CRYPTO_IV_SIZE; i++)
+ crypt_req.iv[i] = crypt_req_32.iv[i];
+ crypt_req.ivlen = crypt_req_32.ivlen;
+ crypt_req.plaintext =
+ (u8 *)(void __user *)(__u64)(crypt_req_32.plaintext);
+ crypt_req.plaintext_sz = crypt_req_32.plaintext_sz;
+ crypt_req.result =
+ (u8 *)(void __user *)(__u64)(crypt_req_32.result);
+
+ ret = process_crypt_req(ctx, &crypt_req);
+ break;
+#endif
+
case TEGRA_CRYPTO_IOCTL_PROCESS_REQ:
ret = copy_from_user(&crypt_req, (void __user *)arg,
sizeof(crypt_req));
ret = process_crypt_req(ctx, &crypt_req);
break;
- case TEGRA_CRYPTO_IOCTL_SET_SEED:
- if (copy_from_user(&rng_req, (void __user *)arg,
- sizeof(rng_req))) {
+#ifdef CONFIG_COMPAT
+ case TEGRA_CRYPTO_IOCTL_SET_SEED_32:
+ if (copy_from_user(&rng_req_32, (void __user *)arg,
+ sizeof(rng_req_32))) {
ret = -EFAULT;
pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
return ret;
}
+ for (i = 0; i < TEGRA_CRYPTO_RNG_SEED_SIZE; i++)
+ rng_req.seed[i] = rng_req_32.seed[i];
+ rng_req.type = rng_req_32.type;
+ /* fall through */
+#endif
+
+ case TEGRA_CRYPTO_IOCTL_SET_SEED:
+ if (ioctl_num == TEGRA_CRYPTO_IOCTL_SET_SEED) {
+ if (copy_from_user(&rng_req, (void __user *)arg,
+ sizeof(rng_req))) {
+ ret = -EFAULT;
+ pr_err("%s: copy_from_user fail(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
memcpy(ctx->seed, rng_req.seed, TEGRA_CRYPTO_RNG_SEED_SIZE);
if (rng_req.type == RNG_DRBG)
crypto_rng_seedsize(ctx->rng));
break;
- case TEGRA_CRYPTO_IOCTL_GET_RANDOM:
- if (copy_from_user(&rng_req, (void __user *)arg,
- sizeof(rng_req))) {
+#ifdef CONFIG_COMPAT
+ case TEGRA_CRYPTO_IOCTL_GET_RANDOM_32:
+ if (copy_from_user(&rng_req_32, (void __user *)arg,
+ sizeof(rng_req_32))) {
ret = -EFAULT;
pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
return ret;
}
+ rng_req.nbytes = rng_req_32.nbytes;
+ rng_req.type = rng_req_32.type;
+ rng_req.rdata = (u8 *)(void __user *)(__u64)rng_req_32.rdata;
+ /* fall through */
+#endif
+
+ case TEGRA_CRYPTO_IOCTL_GET_RANDOM:
+ if (ioctl_num == TEGRA_CRYPTO_IOCTL_GET_RANDOM) {
+ if (copy_from_user(&rng_req, (void __user *)arg,
+ sizeof(rng_req))) {
+ ret = -EFAULT;
+ pr_err("%s: copy_from_user fail(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
rng = kzalloc(rng_req.nbytes, GFP_KERNEL);
if (!rng) {
if (rng_req.type == RNG_DRBG)
goto rng_out;
}
- ret = copy_to_user(user_ptr(rng_req.rdata),
+ ret = copy_to_user((void __user *)rng_req.rdata,
(const void *)rng, rng_req.nbytes);
if (ret) {
ret = -EFAULT;
kfree(rng);
break;
+#ifdef CONFIG_COMPAT
+ case TEGRA_CRYPTO_IOCTL_GET_SHA_32:
+ ret = copy_from_user(&sha_req_32, (void __user *)arg,
+ sizeof(sha_req_32));
+
+ for (i = 0; i < sha_req_32.keylen; i++)
+ sha_req.key[i] = sha_req_32.key[i];
+ sha_req.keylen = sha_req_32.keylen;
+ sha_req.algo =
+ (unsigned char *)(void __user *)(__u64)(sha_req_32.algo);
+ sha_req.plaintext =
+ (unsigned char *)(void __user *)(__u64)(sha_req_32.plaintext);
+ sha_req.plaintext_sz = sha_req_32.plaintext_sz;
+ sha_req.result =
+ (unsigned char *)(void __user *)(__u64)(sha_req_32.result);
+
+ ret = tegra_crypto_sha(&sha_req);
+ break;
+#endif
+
case TEGRA_CRYPTO_IOCTL_GET_SHA:
if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) {
if (copy_from_user(&sha_req, (void __user *)arg,
}
break;
+#ifdef CONFIG_COMPAT
+ case TEGRA_CRYPTO_IOCTL_RSA_REQ_32:
+ if (copy_from_user(&rsa_req_32, (void __user *)arg,
+ sizeof(rsa_req_32))) {
+ ret = -EFAULT;
+ pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ rsa_req.keylen = rsa_req_32.keylen;
+ rsa_req.algo = rsa_req_32.algo;
+ rsa_req.modlen = rsa_req_32.modlen;
+ rsa_req.pub_explen = rsa_req_32.pub_explen;
+ rsa_req.prv_explen = rsa_req_32.prv_explen;
+ rsa_req.key = (char *)(void __user *)(__u64)(rsa_req_32.key);
+ rsa_req.message =
+ (char *)(void __user *)(__u64)(rsa_req_32.message);
+ rsa_req.msg_len = rsa_req_32.msg_len;
+ rsa_req.result =
+ (char *)(void __user *)(__u64)(rsa_req_32.result);
+ rsa_req.skip_key = rsa_req_32.skip_key;
+
+ ret = tegra_crypt_rsa(ctx, &rsa_req);
+ break;
+#endif
+
case TEGRA_CRYPTO_IOCTL_RSA_REQ:
if (copy_from_user(&rsa_req, (void __user *)arg,
sizeof(rsa_req))) {
.open = tegra_crypto_dev_open,
.release = tegra_crypto_dev_release,
.unlocked_ioctl = tegra_crypto_dev_ioctl,
+#ifdef CONFIG_COMPAT
.compat_ioctl = tegra_crypto_dev_ioctl,
+#endif
};
struct miscdevice tegra_crypto_device = {
#include <asm-generic/ioctl.h>
-#ifdef CONFIG_COMPAT
-#define user_ptr(p) ((void __user *)(__u64)(p))
-#else
-#define user_ptr(p) (p)
-#endif
-
/* ioctl arg = 1 if you want to use ssk. arg = 0 to use normal key */
-#define TEGRA_CRYPTO_IOCTL_NEED_SSK _IOWR(0x98, 100, int)
-#define TEGRA_CRYPTO_IOCTL_PROCESS_REQ _IOWR(0x98, 101, int*)
-#define TEGRA_CRYPTO_IOCTL_SET_SEED _IOWR(0x98, 102, int*)
-#define TEGRA_CRYPTO_IOCTL_GET_RANDOM _IOWR(0x98, 103, int*)
-#define TEGRA_CRYPTO_IOCTL_GET_SHA _IOWR(0x98, 104, int*)
-#define TEGRA_CRYPTO_IOCTL_RSA_REQ _IOWR(0x98, 105, int*)
+#define TEGRA_CRYPTO_IOCTL_NEED_SSK _IOWR(0x98, 100, int)
#define TEGRA_CRYPTO_MAX_KEY_SIZE AES_MAX_KEY_SIZE
#define RSA_KEY_SIZE 512
int keylen;
char iv[TEGRA_CRYPTO_IV_SIZE];
int ivlen;
-#ifdef CONFIG_COMPAT
- u32 plaintext;
- u32 result;
-#else
u8 *plaintext;
u8 *result;
-#endif
int plaintext_sz;
int skip_key;
int skip_iv;
};
+#define TEGRA_CRYPTO_IOCTL_PROCESS_REQ \
+ _IOWR(0x98, 101, struct tegra_crypt_req)
+
+#ifdef CONFIG_COMPAT
+struct tegra_crypt_req_32 {
+ int op; /* e.g. TEGRA_CRYPTO_ECB */
+ bool encrypt;
+ char key[TEGRA_CRYPTO_MAX_KEY_SIZE];
+ int keylen;
+ char iv[TEGRA_CRYPTO_IV_SIZE];
+ int ivlen;
+ __u32 plaintext;
+ __u32 result;
+ int plaintext_sz;
+ int skip_key;
+ int skip_iv;
+};
+#define TEGRA_CRYPTO_IOCTL_PROCESS_REQ_32 \
+ _IOWR(0x98, 101, struct tegra_crypt_req_32)
+#endif
/* pointer to this struct should be passed to:
* TEGRA_CRYPTO_IOCTL_SET_SEED
*/
struct tegra_rng_req {
u8 seed[TEGRA_CRYPTO_RNG_SEED_SIZE];
-#ifdef CONFIG_COMPAT
- u32 rdata; /* random generated data */
-#else
u8 *rdata; /* random generated data */
-#endif
int nbytes; /* random data length */
int type;
};
+#define TEGRA_CRYPTO_IOCTL_SET_SEED \
+ _IOWR(0x98, 102, struct tegra_rng_req)
+#define TEGRA_CRYPTO_IOCTL_GET_RANDOM \
+ _IOWR(0x98, 103, struct tegra_rng_req)
-struct tegra_rsa_req {
#ifdef CONFIG_COMPAT
- u32 key;
- u32 message;
- u32 result;
-#else
+struct tegra_rng_req_32 {
+ u8 seed[TEGRA_CRYPTO_RNG_SEED_SIZE];
+ __u32 rdata; /* random generated data */
+ int nbytes; /* random data length */
+ int type;
+};
+#define TEGRA_CRYPTO_IOCTL_SET_SEED_32 \
+ _IOWR(0x98, 102, struct tegra_rng_req_32)
+#define TEGRA_CRYPTO_IOCTL_GET_RANDOM_32 \
+ _IOWR(0x98, 103, struct tegra_rng_req_32)
+#endif
+
+struct tegra_rsa_req {
char *key;
char *message;
char *result;
-#endif
int algo;
int keylen;
int msg_len;
int prv_explen;
int skip_key;
};
+#define TEGRA_CRYPTO_IOCTL_RSA_REQ \
+ _IOWR(0x98, 105, struct tegra_rsa_req)
+
+#ifdef CONFIG_COMPAT
+struct tegra_rsa_req_32 {
+ __u32 key;
+ __u32 message;
+ __u32 result;
+ int algo;
+ int keylen;
+ int msg_len;
+ int modlen;
+ int pub_explen;
+ int prv_explen;
+ int skip_key;
+};
+#define TEGRA_CRYPTO_IOCTL_RSA_REQ_32 \
+ _IOWR(0x98, 105, struct tegra_rsa_req_32)
+#endif
struct tegra_sha_req {
char key[TEGRA_CRYPTO_MAX_KEY_SIZE];
int keylen;
-#ifdef CONFIG_COMPAT
- u32 algo;
- u32 plaintext;
- u32 result;
-#else
unsigned char *algo;
unsigned char *plaintext;
unsigned char *result;
-#endif
int plaintext_sz;
};
+#define TEGRA_CRYPTO_IOCTL_GET_SHA \
+ _IOWR(0x98, 104, struct tegra_sha_req)
+
+#ifdef CONFIG_COMPAT
+struct tegra_sha_req_32 {
+ char key[TEGRA_CRYPTO_MAX_KEY_SIZE];
+ int keylen;
+ __u32 algo;
+ __u32 plaintext;
+ __u32 result;
+ int plaintext_sz;
+};
+#define TEGRA_CRYPTO_IOCTL_GET_SHA_32 \
+ _IOWR(0x98, 104, struct tegra_sha_req_32)
+#endif
#endif