tcrypt.c 78.5 KB
Newer Older
1
/*
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8
 * Quick & dirty crypto testing module.
 *
 * This will only exist until we have a better testing mechanism
 * (e.g. a char device).
 *
 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
9
 * Copyright (c) 2007 Nokia Siemens Networks
Linus Torvalds's avatar
Linus Torvalds committed
10
 *
11 12 13 14 15 16 17
 * Updated RFC4106 AES-GCM testing.
 *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
 *             Adrian Hoban <adrian.hoban@intel.com>
 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 *             Tadeusz Struk (tadeusz.struk@intel.com)
 *             Copyright (c) 2010, Intel Corporation.
 *
Linus Torvalds's avatar
Linus Torvalds committed
18 19
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
20
 * Software Foundation; either version 2 of the License, or (at your option)
Linus Torvalds's avatar
Linus Torvalds committed
21 22 23 24
 * any later version.
 *
 */

25 26
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

27
#include <crypto/aead.h>
28
#include <crypto/hash.h>
29
#include <crypto/skcipher.h>
30
#include <linux/err.h>
31
#include <linux/fips.h>
Linus Torvalds's avatar
Linus Torvalds committed
32
#include <linux/init.h>
33
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
34
#include <linux/module.h>
35
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37
#include <linux/string.h>
#include <linux/moduleparam.h>
38
#include <linux/jiffies.h>
39 40
#include <linux/timex.h>
#include <linux/interrupt.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43
#include "tcrypt.h"

/*
44
 * Need slab memory for testing (size in number of pages).
Linus Torvalds's avatar
Linus Torvalds committed
45
 */
46
#define TVMEMSIZE	4
Linus Torvalds's avatar
Linus Torvalds committed
47 48

/*
49
* Used by test_cipher_speed()
Linus Torvalds's avatar
Linus Torvalds committed
50 51 52 53
*/
#define ENCRYPT 1
#define DECRYPT 0

54 55
#define MAX_DIGEST_SIZE		64

56 57 58 59 60
/*
 * return a string with the driver name
 */
#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))

61 62 63
/*
 * Used by test_cipher_speed()
 */
64
static unsigned int sec;
65

66 67
static char *alg = NULL;
static u32 type;
68
static u32 mask;
Linus Torvalds's avatar
Linus Torvalds committed
69
static int mode;
70
static u32 num_mb = 8;
71
static char *tvmem[TVMEMSIZE];
Linus Torvalds's avatar
Linus Torvalds committed
72 73

static char *check[] = {
74
	"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
75 76
	"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
	"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
77
	"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
78
	"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
79 80 81
	"lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
	"streebog256", "streebog512",
	NULL
Linus Torvalds's avatar
Linus Torvalds committed
82 83
};

84
static u32 block_sizes[] = { 16, 64, 256, 1024, 1472, 8192, 0 };
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };

#define XBUFSIZE 8
#define MAX_IVLEN 32

static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
	int i;

	for (i = 0; i < XBUFSIZE; i++) {
		buf[i] = (void *)__get_free_page(GFP_KERNEL);
		if (!buf[i])
			goto err_free_buf;
	}

	return 0;

err_free_buf:
	while (i-- > 0)
		free_page((unsigned long)buf[i]);

	return -ENOMEM;
}

static void testmgr_free_buf(char *buf[XBUFSIZE])
{
	int i;

	for (i = 0; i < XBUFSIZE; i++)
		free_page((unsigned long)buf[i]);
}

static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
			 unsigned int buflen, const void *assoc,
			 unsigned int aad_size)
{
	int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
	int k, rem;

	if (np > XBUFSIZE) {
		rem = PAGE_SIZE;
		np = XBUFSIZE;
	} else {
		rem = buflen % PAGE_SIZE;
	}

	sg_init_table(sg, np + 1);

	sg_set_buf(&sg[0], assoc, aad_size);

	if (rem)
		np--;
	for (k = 0; k < np; k++)
		sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);

	if (rem)
		sg_set_buf(&sg[k + 1], xbuf[k], rem);
}

144 145
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
146
	struct crypto_wait *wait = req->base.data;
147

148
	return crypto_wait_req(ret, wait);
149 150
}

151 152 153 154 155 156 157 158 159 160 161
struct test_mb_aead_data {
	struct scatterlist sg[XBUFSIZE];
	struct scatterlist sgout[XBUFSIZE];
	struct aead_request *req;
	struct crypto_wait wait;
	char *xbuf[XBUFSIZE];
	char *xoutbuf[XBUFSIZE];
	char *axbuf[XBUFSIZE];
};

static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
162
				u32 num_mb, int *rc)
163
{
164
	int i, err = 0;
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

	/* Fire up a bunch of concurrent requests */
	for (i = 0; i < num_mb; i++) {
		if (enc == ENCRYPT)
			rc[i] = crypto_aead_encrypt(data[i].req);
		else
			rc[i] = crypto_aead_decrypt(data[i].req);
	}

	/* Wait for all requests to finish */
	for (i = 0; i < num_mb; i++) {
		rc[i] = crypto_wait_req(rc[i], &data[i].wait);

		if (rc[i]) {
			pr_info("concurrent request %d error %d\n", i, rc[i]);
			err = rc[i];
		}
	}

	return err;
}

static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
				int blen, int secs, u32 num_mb)
189 190 191
{
	unsigned long start, end;
	int bcount;
192 193 194 195 196 197
	int ret = 0;
	int *rc;

	rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
	if (!rc)
		return -ENOMEM;
198

199
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
200
	     time_before(jiffies, end); bcount++) {
201
		ret = do_mult_aead_op(data, enc, num_mb, rc);
202
		if (ret)
203
			goto out;
204 205
	}

206 207
	pr_cont("%d operations in %d seconds (%ld bytes)\n",
		bcount * num_mb, secs, (long)bcount * blen * num_mb);
208 209 210 211

out:
	kfree(rc);
	return ret;
212 213
}

214 215
static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
			       int blen, u32 num_mb)
216 217 218 219
{
	unsigned long cycles = 0;
	int ret = 0;
	int i;
220 221 222 223 224
	int *rc;

	rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
	if (!rc)
		return -ENOMEM;
225 226 227

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
228
		ret = do_mult_aead_op(data, enc, num_mb, rc);
229 230 231 232 233 234 235 236 237
		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();
238
		ret = do_mult_aead_op(data, enc, num_mb, rc);
239 240 241 242 243 244 245 246
		end = get_cycles();

		if (ret)
			goto out;

		cycles += end - start;
	}

247 248
	pr_cont("1 operation in %lu cycles (%d bytes)\n",
		(cycles + 4) / (8 * num_mb), blen);
249

250 251
out:
	kfree(rc);
252 253 254
	return ret;
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268
static void test_mb_aead_speed(const char *algo, int enc, int secs,
			       struct aead_speed_template *template,
			       unsigned int tcount, u8 authsize,
			       unsigned int aad_size, u8 *keysize, u32 num_mb)
{
	struct test_mb_aead_data *data;
	struct crypto_aead *tfm;
	unsigned int i, j, iv_len;
	const char *key;
	const char *e;
	void *assoc;
	u32 *b_size;
	char *iv;
	int ret;
269 270


271 272 273 274
	if (aad_size >= PAGE_SIZE) {
		pr_err("associate data length (%u) too big\n", aad_size);
		return;
	}
275

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
	if (!iv)
		return;

	if (enc == ENCRYPT)
		e = "encryption";
	else
		e = "decryption";

	data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
	if (!data)
		goto out_free_iv;

	tfm = crypto_alloc_aead(algo, 0, 0);
	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n",
			algo, PTR_ERR(tfm));
		goto out_free_data;
294 295
	}

296
	ret = crypto_aead_setauthsize(tfm, authsize);
297

298 299 300 301 302 303
	for (i = 0; i < num_mb; ++i)
		if (testmgr_alloc_buf(data[i].xbuf)) {
			while (i--)
				testmgr_free_buf(data[i].xbuf);
			goto out_free_tfm;
		}
304

305 306 307 308 309 310 311 312 313 314
	for (i = 0; i < num_mb; ++i)
		if (testmgr_alloc_buf(data[i].axbuf)) {
			while (i--)
				testmgr_free_buf(data[i].axbuf);
			goto out_free_xbuf;
		}

	for (i = 0; i < num_mb; ++i)
		if (testmgr_alloc_buf(data[i].xoutbuf)) {
			while (i--)
315
				testmgr_free_buf(data[i].xoutbuf);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
			goto out_free_axbuf;
		}

	for (i = 0; i < num_mb; ++i) {
		data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
		if (!data[i].req) {
			pr_err("alg: skcipher: Failed to allocate request for %s\n",
			       algo);
			while (i--)
				aead_request_free(data[i].req);
			goto out_free_xoutbuf;
		}
	}

	for (i = 0; i < num_mb; ++i) {
		crypto_init_wait(&data[i].wait);
		aead_request_set_callback(data[i].req,
					  CRYPTO_TFM_REQ_MAY_BACKLOG,
					  crypto_req_done, &data[i].wait);
	}

	pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
		get_driver_name(crypto_aead, tfm), e);

	i = 0;
	do {
		b_size = aead_sizes;
		do {
			if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
345
				pr_err("template (%u) too big for buffer (%lu)\n",
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
				       authsize + *b_size,
				       XBUFSIZE * PAGE_SIZE);
				goto out;
			}

			pr_info("test %u (%d bit key, %d byte blocks): ", i,
				*keysize * 8, *b_size);

			/* Set up tfm global state, i.e. the key */

			memset(tvmem[0], 0xff, PAGE_SIZE);
			key = tvmem[0];
			for (j = 0; j < tcount; j++) {
				if (template[j].klen == *keysize) {
					key = template[j].key;
					break;
				}
			}

			crypto_aead_clear_flags(tfm, ~0);

			ret = crypto_aead_setkey(tfm, key, *keysize);
			if (ret) {
				pr_err("setkey() failed flags=%x\n",
				       crypto_aead_get_flags(tfm));
				goto out;
			}

			iv_len = crypto_aead_ivsize(tfm);
			if (iv_len)
				memset(iv, 0xff, iv_len);

			/* Now setup per request stuff, i.e. buffers */

			for (j = 0; j < num_mb; ++j) {
				struct test_mb_aead_data *cur = &data[j];

				assoc = cur->axbuf[0];
				memset(assoc, 0xff, aad_size);

				sg_init_aead(cur->sg, cur->xbuf,
					     *b_size + (enc ? 0 : authsize),
					     assoc, aad_size);

				sg_init_aead(cur->sgout, cur->xoutbuf,
					     *b_size + (enc ? authsize : 0),
					     assoc, aad_size);

				aead_request_set_ad(cur->req, aad_size);

				if (!enc) {

					aead_request_set_crypt(cur->req,
							       cur->sgout,
							       cur->sg,
							       *b_size, iv);
					ret = crypto_aead_encrypt(cur->req);
					ret = do_one_aead_op(cur->req, ret);

					if (ret) {
						pr_err("calculating auth failed failed (%d)\n",
						       ret);
						break;
					}
				}

				aead_request_set_crypt(cur->req, cur->sg,
						       cur->sgout, *b_size +
						       (enc ? 0 : authsize),
						       iv);

			}

419
			if (secs) {
420 421
				ret = test_mb_aead_jiffies(data, enc, *b_size,
							   secs, num_mb);
422 423
				cond_resched();
			} else {
424 425
				ret = test_mb_aead_cycles(data, enc, *b_size,
							  num_mb);
426
			}
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455

			if (ret) {
				pr_err("%s() failed return code=%d\n", e, ret);
				break;
			}
			b_size++;
			i++;
		} while (*b_size);
		keysize++;
	} while (*keysize);

out:
	for (i = 0; i < num_mb; ++i)
		aead_request_free(data[i].req);
out_free_xoutbuf:
	for (i = 0; i < num_mb; ++i)
		testmgr_free_buf(data[i].xoutbuf);
out_free_axbuf:
	for (i = 0; i < num_mb; ++i)
		testmgr_free_buf(data[i].axbuf);
out_free_xbuf:
	for (i = 0; i < num_mb; ++i)
		testmgr_free_buf(data[i].xbuf);
out_free_tfm:
	crypto_free_aead(tfm);
out_free_data:
	kfree(data);
out_free_iv:
	kfree(iv);
456 457
}

458 459
static int test_aead_jiffies(struct aead_request *req, int enc,
				int blen, int secs)
460
{
461 462 463
	unsigned long start, end;
	int bcount;
	int ret;
464

465 466 467 468 469 470 471 472 473 474 475 476 477 478
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
	     time_before(jiffies, end); bcount++) {
		if (enc)
			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
		else
			ret = do_one_aead_op(req, crypto_aead_decrypt(req));

		if (ret)
			return ret;
	}

	printk("%d operations in %d seconds (%ld bytes)\n",
	       bcount, secs, (long)bcount * blen);
	return 0;
479 480
}

481
static int test_aead_cycles(struct aead_request *req, int enc, int blen)
482
{
483 484 485
	unsigned long cycles = 0;
	int ret = 0;
	int i;
486

487 488 489 490 491 492 493 494 495
	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
		if (enc)
			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
		else
			ret = do_one_aead_op(req, crypto_aead_decrypt(req));

		if (ret)
			goto out;
496
	}
497

498 499 500
	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;
501

502 503 504 505 506 507
		start = get_cycles();
		if (enc)
			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
		else
			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
		end = get_cycles();
508

509 510
		if (ret)
			goto out;
511

512 513 514 515 516 517 518 519 520
		cycles += end - start;
	}

out:
	if (ret == 0)
		printk("1 operation in %lu cycles (%d bytes)\n",
		       (cycles + 4) / 8, blen);

	return ret;
521 522
}

523
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
524 525 526 527 528 529 530 531 532 533 534 535 536
			    struct aead_speed_template *template,
			    unsigned int tcount, u8 authsize,
			    unsigned int aad_size, u8 *keysize)
{
	unsigned int i, j;
	struct crypto_aead *tfm;
	int ret = -ENOMEM;
	const char *key;
	struct aead_request *req;
	struct scatterlist *sg;
	struct scatterlist *sgout;
	const char *e;
	void *assoc;
537
	char *iv;
538 539 540 541 542
	char *xbuf[XBUFSIZE];
	char *xoutbuf[XBUFSIZE];
	char *axbuf[XBUFSIZE];
	unsigned int *b_size;
	unsigned int iv_len;
543
	struct crypto_wait wait;
544

545 546 547 548
	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
	if (!iv)
		return;

549 550
	if (aad_size >= PAGE_SIZE) {
		pr_err("associate data length (%u) too big\n", aad_size);
551
		goto out_noxbuf;
552 553
	}

554 555 556 557 558 559 560 561 562 563 564 565
	if (enc == ENCRYPT)
		e = "encryption";
	else
		e = "decryption";

	if (testmgr_alloc_buf(xbuf))
		goto out_noxbuf;
	if (testmgr_alloc_buf(axbuf))
		goto out_noaxbuf;
	if (testmgr_alloc_buf(xoutbuf))
		goto out_nooutbuf;

566
	sg = kmalloc(sizeof(*sg) * 9 * 2, GFP_KERNEL);
567 568
	if (!sg)
		goto out_nosg;
569
	sgout = &sg[9];
570

571
	tfm = crypto_alloc_aead(algo, 0, 0);
572 573 574 575

	if (IS_ERR(tfm)) {
		pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
		       PTR_ERR(tfm));
576
		goto out_notfm;
577 578
	}

579
	crypto_init_wait(&wait);
580 581 582
	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
			get_driver_name(crypto_aead, tfm), e);

583 584 585 586
	req = aead_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		pr_err("alg: aead: Failed to allocate request for %s\n",
		       algo);
587
		goto out_noreq;
588 589
	}

590
	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
591
				  crypto_req_done, &wait);
592

593 594 595 596 597
	i = 0;
	do {
		b_size = aead_sizes;
		do {
			assoc = axbuf[0];
598
			memset(assoc, 0xff, aad_size);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618

			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
				pr_err("template (%u) too big for tvmem (%lu)\n",
				       *keysize + *b_size,
					TVMEMSIZE * PAGE_SIZE);
				goto out;
			}

			key = tvmem[0];
			for (j = 0; j < tcount; j++) {
				if (template[j].klen == *keysize) {
					key = template[j].key;
					break;
				}
			}
			ret = crypto_aead_setkey(tfm, key, *keysize);
			ret = crypto_aead_setauthsize(tfm, authsize);

			iv_len = crypto_aead_ivsize(tfm);
			if (iv_len)
619
				memset(iv, 0xff, iv_len);
620 621 622 623 624 625 626 627 628 629 630 631 632 633

			crypto_aead_clear_flags(tfm, ~0);
			printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
					i, *keysize * 8, *b_size);


			memset(tvmem[0], 0xff, PAGE_SIZE);

			if (ret) {
				pr_err("setkey() failed flags=%x\n",
						crypto_aead_get_flags(tfm));
				goto out;
			}

634 635
			sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
				     assoc, aad_size);
636

637
			sg_init_aead(sgout, xoutbuf,
638 639
				     *b_size + (enc ? authsize : 0), assoc,
				     aad_size);
640

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
			aead_request_set_ad(req, aad_size);

			if (!enc) {

				/*
				 * For decryption we need a proper auth so
				 * we do the encryption path once with buffers
				 * reversed (input <-> output) to calculate it
				 */
				aead_request_set_crypt(req, sgout, sg,
						       *b_size, iv);
				ret = do_one_aead_op(req,
						     crypto_aead_encrypt(req));

				if (ret) {
					pr_err("calculating auth failed failed (%d)\n",
					       ret);
					break;
				}
			}

662 663 664
			aead_request_set_crypt(req, sg, sgout,
					       *b_size + (enc ? 0 : authsize),
					       iv);
665

666
			if (secs) {
667 668
				ret = test_aead_jiffies(req, enc, *b_size,
							secs);
669 670
				cond_resched();
			} else {
671
				ret = test_aead_cycles(req, enc, *b_size);
672
			}
673 674 675 676 677 678 679 680 681 682 683 684

			if (ret) {
				pr_err("%s() failed return code=%d\n", e, ret);
				break;
			}
			b_size++;
			i++;
		} while (*b_size);
		keysize++;
	} while (*keysize);

out:
685 686
	aead_request_free(req);
out_noreq:
687
	crypto_free_aead(tfm);
688
out_notfm:
689 690 691 692 693 694 695 696
	kfree(sg);
out_nosg:
	testmgr_free_buf(xoutbuf);
out_nooutbuf:
	testmgr_free_buf(axbuf);
out_noaxbuf:
	testmgr_free_buf(xbuf);
out_noxbuf:
697
	kfree(iv);
698
}
699

700 701 702 703 704 705 706 707 708 709 710 711 712
static void test_hash_sg_init(struct scatterlist *sg)
{
	int i;

	sg_init_table(sg, TVMEMSIZE);
	for (i = 0; i < TVMEMSIZE; i++) {
		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
		memset(tvmem[i], 0xff, PAGE_SIZE);
	}
}

static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
713
	struct crypto_wait *wait = req->base.data;
714

715
	return crypto_wait_req(ret, wait);
716 717
}

718
struct test_mb_ahash_data {
719
	struct scatterlist sg[XBUFSIZE];
720 721
	char result[64];
	struct ahash_request *req;
722
	struct crypto_wait wait;
723 724
	char *xbuf[XBUFSIZE];
};
725

726 727
static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
				   int *rc)
728
{
729
	int i, err = 0;
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752

	/* Fire up a bunch of concurrent requests */
	for (i = 0; i < num_mb; i++)
		rc[i] = crypto_ahash_digest(data[i].req);

	/* Wait for all requests to finish */
	for (i = 0; i < num_mb; i++) {
		rc[i] = crypto_wait_req(rc[i], &data[i].wait);

		if (rc[i]) {
			pr_info("concurrent request %d error %d\n", i, rc[i]);
			err = rc[i];
		}
	}

	return err;
}

static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
				 int secs, u32 num_mb)
{
	unsigned long start, end;
	int bcount;
753 754 755 756 757 758
	int ret = 0;
	int *rc;

	rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
	if (!rc)
		return -ENOMEM;
759 760 761

	for (start = jiffies, end = start + secs * HZ, bcount = 0;
	     time_before(jiffies, end); bcount++) {
762
		ret = do_mult_ahash_op(data, num_mb, rc);
763
		if (ret)
764
			goto out;
765 766 767 768
	}

	pr_cont("%d operations in %d seconds (%ld bytes)\n",
		bcount * num_mb, secs, (long)bcount * blen * num_mb);
769 770 771 772

out:
	kfree(rc);
	return ret;
773 774 775 776 777 778 779 780
}

static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
				u32 num_mb)
{
	unsigned long cycles = 0;
	int ret = 0;
	int i;
781 782 783 784 785
	int *rc;

	rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
	if (!rc)
		return -ENOMEM;
786 787 788

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
789
		ret = do_mult_ahash_op(data, num_mb, rc);
790 791 792 793 794 795 796 797 798
		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();
799
		ret = do_mult_ahash_op(data, num_mb, rc);
800 801 802 803 804 805 806 807
		end = get_cycles();

		if (ret)
			goto out;

		cycles += end - start;
	}

808 809
	pr_cont("1 operation in %lu cycles (%d bytes)\n",
		(cycles + 4) / (8 * num_mb), blen);
810

811 812
out:
	kfree(rc);
813 814 815 816
	return ret;
}

static void test_mb_ahash_speed(const char *algo, unsigned int secs,
817
				struct hash_speed *speed, u32 num_mb)
818
{
819
	struct test_mb_ahash_data *data;
820
	struct crypto_ahash *tfm;
821 822 823
	unsigned int i, j, k;
	int ret;

824
	data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
825 826
	if (!data)
		return;
827 828 829 830 831

	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n",
			algo, PTR_ERR(tfm));
832
		goto free_data;
833
	}
834

835
	for (i = 0; i < num_mb; ++i) {
836 837
		if (testmgr_alloc_buf(data[i].xbuf))
			goto out;
838

839
		crypto_init_wait(&data[i].wait);
840

841 842
		data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
		if (!data[i].req) {
843 844
			pr_err("alg: hash: Failed to allocate request for %s\n",
			       algo);
845
			goto out;
846 847
		}

848 849
		ahash_request_set_callback(data[i].req, 0, crypto_req_done,
					   &data[i].wait);
850 851 852 853 854 855

		sg_init_table(data[i].sg, XBUFSIZE);
		for (j = 0; j < XBUFSIZE; j++) {
			sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
			memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
		}
856 857
	}

858 859
	pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
		get_driver_name(crypto_ahash, tfm));
860 861

	for (i = 0; speed[i].blen != 0; i++) {
862 863 864 865
		/* For some reason this only tests digests. */
		if (speed[i].blen != speed[i].plen)
			continue;

866
		if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
867
			pr_err("template (%u) too big for tvmem (%lu)\n",
868
			       speed[i].blen, XBUFSIZE * PAGE_SIZE);
869
			goto out;
870 871 872 873 874
		}

		if (speed[i].klen)
			crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);

875
		for (k = 0; k < num_mb; k++)
876 877
			ahash_request_set_crypt(data[k].req, data[k].sg,
						data[k].result, speed[i].blen);
878

879 880
		pr_info("test%3u "
			"(%5u byte blocks,%5u bytes per update,%4u updates): ",
881 882 883
			i, speed[i].blen, speed[i].plen,
			speed[i].blen / speed[i].plen);

884
		if (secs) {
885 886
			ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
						    num_mb);
887 888
			cond_resched();
		} else {
889
			ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
890
		}
891

892 893 894 895 896

		if (ret) {
			pr_err("At least one hashing failed ret=%d\n", ret);
			break;
		}
897 898 899
	}

out:
900
	for (k = 0; k < num_mb; ++k)
901 902
		ahash_request_free(data[k].req);

903
	for (k = 0; k < num_mb; ++k)
904 905 906 907 908 909
		testmgr_free_buf(data[k].xbuf);

	crypto_free_ahash(tfm);

free_data:
	kfree(data);
910 911
}

912
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
913
				     char *out, int secs)
914 915 916 917 918
{
	unsigned long start, end;
	int bcount;
	int ret;

919
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
920 921 922 923 924 925 926
	     time_before(jiffies, end); bcount++) {
		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
		if (ret)
			return ret;
	}

	printk("%6u opers/sec, %9lu bytes/sec\n",
927
	       bcount / secs, ((long)bcount * blen) / secs);
928 929 930 931 932

	return 0;
}

static int test_ahash_jiffies(struct ahash_request *req, int blen,
933
			      int plen, char *out, int secs)
934 935 936 937 938 939
{
	unsigned long start, end;
	int bcount, pcount;
	int ret;

	if (plen == blen)
940
		return test_ahash_jiffies_digest(req, blen, out, secs);
941

942
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
943
	     time_before(jiffies, end); bcount++) {
944
		ret = do_one_ahash_op(req, crypto_ahash_init(req));
945 946 947 948 949 950 951 952 953 954 955 956 957 958
		if (ret)
			return ret;
		for (pcount = 0; pcount < blen; pcount += plen) {
			ret = do_one_ahash_op(req, crypto_ahash_update(req));
			if (ret)
				return ret;
		}
		/* we assume there is enough space in 'out' for the result */
		ret = do_one_ahash_op(req, crypto_ahash_final(req));
		if (ret)
			return ret;
	}

	pr_cont("%6u opers/sec, %9lu bytes/sec\n",
959
		bcount / secs, ((long)bcount * blen) / secs);
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012

	return 0;
}

static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
				    char *out)
{
	unsigned long cycles = 0;
	int ret, i;

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();

		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
		if (ret)
			goto out;

		end = get_cycles();

		cycles += end - start;
	}

out:
	if (ret)
		return ret;

	pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
		cycles / 8, cycles / (8 * blen));

	return 0;
}

static int test_ahash_cycles(struct ahash_request *req, int blen,
			     int plen, char *out)
{
	unsigned long cycles = 0;
	int i, pcount, ret;

	if (plen == blen)
		return test_ahash_cycles_digest(req, blen, out);

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
1013
		ret = do_one_ahash_op(req, crypto_ahash_init(req));
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
		if (ret)
			goto out;
		for (pcount = 0; pcount < blen; pcount += plen) {
			ret = do_one_ahash_op(req, crypto_ahash_update(req));
			if (ret)
				goto out;
		}
		ret = do_one_ahash_op(req, crypto_ahash_final(req));
		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();

1032
		ret = do_one_ahash_op(req, crypto_ahash_init(req));
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
		if (ret)
			goto out;
		for (pcount = 0; pcount < blen; pcount += plen) {
			ret = do_one_ahash_op(req, crypto_ahash_update(req));
			if (ret)
				goto out;
		}
		ret = do_one_ahash_op(req, crypto_ahash_final(req));
		if (ret)
			goto out;

		end = get_cycles();

		cycles += end - start;
	}

out:
	if (ret)
		return ret;

	pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
		cycles / 8, cycles / (8 * blen));

	return 0;
}

Herbert Xu's avatar
Herbert Xu committed
1059 1060
static void test_ahash_speed_common(const char *algo, unsigned int secs,
				    struct hash_speed *speed, unsigned mask)
1061 1062
{
	struct scatterlist sg[TVMEMSIZE];
1063
	struct crypto_wait wait;
1064 1065
	struct ahash_request *req;
	struct crypto_ahash *tfm;
1066
	char *output;
1067 1068
	int i, ret;

Herbert Xu's avatar
Herbert Xu committed
1069
	tfm = crypto_alloc_ahash(algo, 0, mask);
1070 1071 1072 1073 1074 1075
	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n",
		       algo, PTR_ERR(tfm));
		return;
	}

1076 1077 1078
	printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
			get_driver_name(crypto_ahash, tfm));

1079 1080 1081
	if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) {
		pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm),
		       MAX_DIGEST_SIZE);
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
		goto out;
	}

	test_hash_sg_init(sg);
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		pr_err("ahash request allocation failure\n");
		goto out;
	}

1092
	crypto_init_wait(&wait);
1093
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1094
				   crypto_req_done, &wait);
1095

1096 1097 1098 1099
	output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
	if (!output)
		goto out_nomem;

1100 1101 1102 1103 1104 1105 1106
	for (i = 0; speed[i].blen != 0; i++) {
		if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
			pr_err("template (%u) too big for tvmem (%lu)\n",
			       speed[i].blen, TVMEMSIZE * PAGE_SIZE);
			break;
		}

1107 1108 1109
		if (speed[i].klen)
			crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);

1110 1111 1112 1113 1114 1115
		pr_info("test%3u "
			"(%5u byte blocks,%5u bytes per update,%4u updates): ",
			i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);

		ahash_request_set_crypt(req, sg, output, speed[i].plen);

1116
		if (secs) {
1117
			ret = test_ahash_jiffies(req, speed[i].blen,
1118
						 speed[i].plen, output, secs);
1119 1120
			cond_resched();
		} else {
1121 1122
			ret = test_ahash_cycles(req, speed[i].blen,
						speed[i].plen, output);
1123
		}
1124 1125 1126 1127 1128 1129 1130

		if (ret) {
			pr_err("hashing failed ret=%d\n", ret);
			break;
		}
	}

1131 1132 1133
	kfree(output);

out_nomem:
1134 1135 1136 1137 1138 1139
	ahash_request_free(req);

out:
	crypto_free_ahash(tfm);
}

Herbert Xu's avatar
Herbert Xu committed
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
static void test_ahash_speed(const char *algo, unsigned int secs,
			     struct hash_speed *speed)
{
	return test_ahash_speed_common(algo, secs, speed, 0);
}

static void test_hash_speed(const char *algo, unsigned int secs,
			    struct hash_speed *speed)
{
	return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}

1152 1153 1154 1155 1156 1157 1158 1159
struct test_mb_skcipher_data {
	struct scatterlist sg[XBUFSIZE];
	struct skcipher_request *req;
	struct crypto_wait wait;
	char *xbuf[XBUFSIZE];
};

static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
1160
				u32 num_mb, int *rc)
1161
{
1162
	int i, err = 0;
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189

	/* Fire up a bunch of concurrent requests */
	for (i = 0; i < num_mb; i++) {
		if (enc == ENCRYPT)
			rc[i] = crypto_skcipher_encrypt(data[i].req);
		else
			rc[i] = crypto_skcipher_decrypt(data[i].req);
	}

	/* Wait for all requests to finish */
	for (i = 0; i < num_mb; i++) {
		rc[i] = crypto_wait_req(rc[i], &data[i].wait);

		if (rc[i]) {
			pr_info("concurrent request %d error %d\n", i, rc[i]);
			err = rc[i];
		}
	}

	return err;
}

static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
				int blen, int secs, u32 num_mb)
{
	unsigned long start, end;
	int bcount;
1190 1191 1192 1193 1194 1195
	int ret = 0;
	int *rc;

	rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
	if (!rc)
		return -ENOMEM;
1196 1197 1198

	for (start = jiffies, end = start + secs * HZ, bcount = 0;
	     time_before(jiffies, end); bcount++) {
1199
		ret = do_mult_acipher_op(data, enc, num_mb, rc);
1200
		if (ret)
1201
			goto out;
1202 1203 1204 1205
	}

	pr_cont("%d operations in %d seconds (%ld bytes)\n",
		bcount * num_mb, secs, (long)bcount * blen * num_mb);
1206 1207 1208 1209

out:
	kfree(rc);
	return ret;
1210 1211 1212 1213 1214 1215 1216 1217
}

static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
			       int blen, u32 num_mb)
{
	unsigned long cycles = 0;
	int ret = 0;
	int i;
1218 1219 1220 1221 1222
	int *rc;

	rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
	if (!rc)
		return -ENOMEM;
1223 1224 1225

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
1226
		ret = do_mult_acipher_op(data, enc, num_mb, rc);
1227 1228 1229 1230 1231 1232 1233 1234 1235
		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();
1236
		ret = do_mult_acipher_op(data, enc, num_mb, rc);
1237 1238 1239 1240 1241 1242 1243 1244
		end = get_cycles();

		if (ret)
			goto out;

		cycles += end - start;
	}

1245 1246
	pr_cont("1 operation in %lu cycles (%d bytes)\n",
		(cycles + 4) / (8 * num_mb), blen);
1247

1248 1249
out:
	kfree(rc);
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
	return ret;
}

static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
				   struct cipher_speed_template *template,
				   unsigned int tcount, u8 *keysize, u32 num_mb)
{
	struct test_mb_skcipher_data *data;
	struct crypto_skcipher *tfm;
	unsigned int i, j, iv_len;
	const char *key;
	const char *e;
	u32 *b_size;
	char iv[128];
	int ret;

	if (enc == ENCRYPT)
		e = "encryption";
	else
		e = "decryption";

	data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
	if (!data)
		return;

	tfm = crypto_alloc_skcipher(algo, 0, 0);
	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n",
			algo, PTR_ERR(tfm));
		goto out_free_data;
	}

	for (i = 0; i < num_mb; ++i)
		if (testmgr_alloc_buf(data[i].xbuf)) {
			while (i--)
				testmgr_free_buf(data[i].xbuf);
			goto out_free_tfm;
		}


	for (i = 0; i < num_mb; ++i)
		if (testmgr_alloc_buf(data[i].xbuf)) {
			while (i--)
				testmgr_free_buf(data[i].xbuf);
			goto out_free_tfm;
		}


	for (i = 0; i < num_mb; ++i) {
		data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
		if (!data[i].req) {
			pr_err("alg: skcipher: Failed to allocate request for %s\n",
			       algo);
			while (i--)
				skcipher_request_free(data[i].req);
			goto out_free_xbuf;
		}
	}

	for (i = 0; i < num_mb; ++i) {
		skcipher_request_set_callback(data[i].req,
					      CRYPTO_TFM_REQ_MAY_BACKLOG,
					      crypto_req_done, &data[i].wait);
		crypto_init_wait(&data[i].wait);
	}

	pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
		get_driver_name(crypto_skcipher, tfm), e);

	i = 0;
	do {
		b_size = block_sizes;
		do {
			if (*b_size > XBUFSIZE * PAGE_SIZE) {
1324
				pr_err("template (%u) too big for buffer (%lu)\n",
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
				       *b_size, XBUFSIZE * PAGE_SIZE);
				goto out;
			}

			pr_info("test %u (%d bit key, %d byte blocks): ", i,
				*keysize * 8, *b_size);

			/* Set up tfm global state, i.e. the key */

			memset(tvmem[0], 0xff, PAGE_SIZE);
			key = tvmem[0];
			for (j = 0; j < tcount; j++) {
				if (template[j].klen == *keysize) {
					key = template[j].key;
					break;
				}
			}

			crypto_skcipher_clear_flags(tfm, ~0);

			ret = crypto_skcipher_setkey(tfm, key, *keysize);
			if (ret) {
				pr_err("setkey() failed flags=%x\n",
				       crypto_skcipher_get_flags(tfm));
				goto out;
			}

			iv_len = crypto_skcipher_ivsize(tfm);
			if (iv_len)
				memset(&iv, 0xff, iv_len);

			/* Now setup per request stuff, i.e. buffers */

			for (j = 0; j < num_mb; ++j) {
				struct test_mb_skcipher_data *cur = &data[j];
				unsigned int k = *b_size;
				unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
				unsigned int p = 0;

				sg_init_table(cur->sg, pages);

				while (k > PAGE_SIZE) {
					sg_set_buf(cur->sg + p, cur->xbuf[p],
						   PAGE_SIZE);
					memset(cur->xbuf[p], 0xff, PAGE_SIZE);
					p++;
					k -= PAGE_SIZE;
				}

				sg_set_buf(cur->sg + p, cur->xbuf[p], k);
				memset(cur->xbuf[p], 0xff, k);

				skcipher_request_set_crypt(cur->req, cur->sg,
							   cur->sg, *b_size,
							   iv);
			}

1382
			if (secs) {
1383 1384 1385
				ret = test_mb_acipher_jiffies(data, enc,
							      *b_size, secs,
							      num_mb);
1386 1387
				cond_resched();
			} else {
1388 1389
				ret = test_mb_acipher_cycles(data, enc,
							     *b_size, num_mb);
1390
			}
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414

			if (ret) {
				pr_err("%s() failed flags=%x\n", e,
				       crypto_skcipher_get_flags(tfm));
				break;
			}
			b_size++;
			i++;
		} while (*b_size);
		keysize++;
	} while (*keysize);

out:
	for (i = 0; i < num_mb; ++i)
		skcipher_request_free(data[i].req);
out_free_xbuf:
	for (i = 0; i < num_mb; ++i)
		testmgr_free_buf(data[i].xbuf);
out_free_tfm:
	crypto_free_skcipher(tfm);
out_free_data:
	kfree(data);
}

1415
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
1416
{
1417
	struct crypto_wait *wait = req->base.data;
1418

1419
	return crypto_wait_req(ret, wait);
1420 1421
}

1422
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
1423
				int blen, int secs)
1424 1425 1426 1427 1428
{
	unsigned long start, end;
	int bcount;
	int ret;

1429
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
1430 1431 1432
	     time_before(jiffies, end); bcount++) {
		if (enc)
			ret = do_one_acipher_op(req,
1433
						crypto_skcipher_encrypt(req));
1434 1435
		else
			ret = do_one_acipher_op(req,
1436
						crypto_skcipher_decrypt(req));
1437 1438 1439 1440 1441 1442

		if (ret)
			return ret;
	}

	pr_cont("%d operations in %d seconds (%ld bytes)\n",
1443
		bcount, secs, (long)bcount * blen);
1444 1445 1446
	return 0;
}

1447
static int test_acipher_cycles(struct skcipher_request *req, int enc,
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
			       int blen)
{
	unsigned long cycles = 0;
	int ret = 0;
	int i;

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
		if (enc)
			ret = do_one_acipher_op(req,
1458
						crypto_skcipher_encrypt(req));
1459 1460
		else
			ret = do_one_acipher_op(req,
1461
						crypto_skcipher_decrypt(req));
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473

		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();
		if (enc)
			ret = do_one_acipher_op(req,
1474
						crypto_skcipher_encrypt(req));
1475 1476
		else
			ret = do_one_acipher_op(req,
1477
						crypto_skcipher_decrypt(req));
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
		end = get_cycles();

		if (ret)
			goto out;

		cycles += end - start;
	}

out:
	if (ret == 0)
		pr_cont("1 operation in %lu cycles (%d bytes)\n",
			(cycles + 4) / 8, blen);

	return ret;
}

1494 1495 1496
static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
				struct cipher_speed_template *template,
				unsigned int tcount, u8 *keysize, bool async)
1497
{
1498
	unsigned int ret, i, j, k, iv_len;
1499
	struct crypto_wait wait;
1500 1501
	const char *key;
	char iv[128];
1502 1503
	struct skcipher_request *req;
	struct crypto_skcipher *tfm;
1504 1505 1506 1507 1508 1509 1510 1511
	const char *e;
	u32 *b_size;

	if (enc == ENCRYPT)
		e = "encryption";
	else
		e = "decryption";

1512
	crypto_init_wait(&wait);
1513

1514
	tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
1515 1516 1517 1518 1519 1520 1521

	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n", algo,
		       PTR_ERR(tfm));
		return;
	}

1522
	pr_info("\ntesting speed of async %s (%s) %s\n", algo,
1523
			get_driver_name(crypto_skcipher, tfm), e);
1524

1525
	req = skcipher_request_alloc(tfm, GFP_KERNEL);
1526 1527 1528 1529 1530 1531
	if (!req) {
		pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
		       algo);
		goto out;
	}

1532
	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1533
				      crypto_req_done, &wait);
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562

	i = 0;
	do {
		b_size = block_sizes;

		do {
			struct scatterlist sg[TVMEMSIZE];

			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
				pr_err("template (%u) too big for "
				       "tvmem (%lu)\n", *keysize + *b_size,
				       TVMEMSIZE * PAGE_SIZE);
				goto out_free_req;
			}

			pr_info("test %u (%d bit key, %d byte blocks): ", i,
				*keysize * 8, *b_size);

			memset(tvmem[0], 0xff, PAGE_SIZE);

			/* set key, plain text and IV */
			key = tvmem[0];
			for (j = 0; j < tcount; j++) {
				if (template[j].klen == *keysize) {
					key = template[j].key;
					break;
				}
			}

1563
			crypto_skcipher_clear_flags(tfm, ~0);
1564

1565
			ret = crypto_skcipher_setkey(tfm, key, *keysize);
1566 1567
			if (ret) {
				pr_err("setkey() failed flags=%x\n",
1568
					crypto_skcipher_get_flags(tfm));
1569 1570 1571
				goto out_free_req;
			}

1572
			k = *keysize + *b_size;
1573 1574
			sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));

1575 1576
			if (k > PAGE_SIZE) {
				sg_set_buf(sg, tvmem[0] + *keysize,
1577
				   PAGE_SIZE - *keysize);
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
				k -= PAGE_SIZE;
				j = 1;
				while (k > PAGE_SIZE) {
					sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
					memset(tvmem[j], 0xff, PAGE_SIZE);
					j++;
					k -= PAGE_SIZE;
				}
				sg_set_buf(sg + j, tvmem[j], k);
				memset(tvmem[j], 0xff, k);
			} else {
				sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
1590 1591
			}

1592
			iv_len = crypto_skcipher_ivsize(tfm);
1593 1594 1595
			if (iv_len)
				memset(&iv, 0xff, iv_len);

1596
			skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
1597

1598
			if (secs) {
1599
				ret = test_acipher_jiffies(req, enc,
1600
							   *b_size, secs);
1601 1602
				cond_resched();
			} else {
1603 1604
				ret = test_acipher_cycles(req, enc,
							  *b_size);
1605
			}
1606 1607 1608

			if (ret) {
				pr_err("%s() failed flags=%x\n", e,
1609
				       crypto_skcipher_get_flags(tfm));
1610 1611 1612 1613 1614 1615 1616 1617 1618
				break;
			}
			b_size++;
			i++;
		} while (*b_size);
		keysize++;
	} while (*keysize);

out_free_req:
1619
	skcipher_request_free(req);
1620
out:
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
	crypto_free_skcipher(tfm);
}

static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
			       struct cipher_speed_template *template,
			       unsigned int tcount, u8 *keysize)
{
	return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
				   true);
}

static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
			      struct cipher_speed_template *template,
			      unsigned int tcount, u8 *keysize)
{
	return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
				   false);
1638 1639
}

1640
static void test_available(void)
Linus Torvalds's avatar
Linus Torvalds committed
1641 1642
{
	char **name = check;
1643

Linus Torvalds's avatar
Linus Torvalds committed
1644 1645
	while (*name) {