Path: blob/master/lib/crypto/tests/hash-test-template.h
26282 views
/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* Test cases for hash functions, including a benchmark. This is included by3* KUnit test suites that want to use it. See sha512_kunit.c for an example.4*5* Copyright 2025 Google LLC6*/7#include <kunit/test.h>8#include <linux/hrtimer.h>9#include <linux/timekeeping.h>10#include <linux/vmalloc.h>11#include <linux/workqueue.h>1213/* test_buf is a guarded buffer, i.e. &test_buf[TEST_BUF_LEN] is not mapped. */14#define TEST_BUF_LEN 1638415static u8 *test_buf;1617static u8 *orig_test_buf;1819static u64 random_seed;2021/*22* This is a simple linear congruential generator. It is used only for testing,23* which does not require cryptographically secure random numbers. A hard-coded24* algorithm is used instead of <linux/prandom.h> so that it matches the25* algorithm used by the test vector generation script. This allows the input26* data in random test vectors to be concisely stored as just the seed.27*/28static u32 rand32(void)29{30random_seed = (random_seed * 25214903917 + 11) & ((1ULL << 48) - 1);31return random_seed >> 16;32}3334static void rand_bytes(u8 *out, size_t len)35{36for (size_t i = 0; i < len; i++)37out[i] = rand32();38}3940static void rand_bytes_seeded_from_len(u8 *out, size_t len)41{42random_seed = len;43rand_bytes(out, len);44}4546static bool rand_bool(void)47{48return rand32() % 2;49}5051/* Generate a random length, preferring small lengths. */52static size_t rand_length(size_t max_len)53{54size_t len;5556switch (rand32() % 3) {57case 0:58len = rand32() % 128;59break;60case 1:61len = rand32() % 3072;62break;63default:64len = rand32();65break;66}67return len % (max_len + 1);68}6970static size_t rand_offset(size_t max_offset)71{72return min(rand32() % 128, max_offset);73}7475static int hash_suite_init(struct kunit_suite *suite)76{77/*78* Allocate the test buffer using vmalloc() with a page-aligned length79* so that it is immediately followed by a guard page. This allows80* buffer overreads to be detected, even in assembly code.81*/82size_t alloc_len = round_up(TEST_BUF_LEN, PAGE_SIZE);8384orig_test_buf = vmalloc(alloc_len);85if (!orig_test_buf)86return -ENOMEM;8788test_buf = orig_test_buf + alloc_len - TEST_BUF_LEN;89return 0;90}9192static void hash_suite_exit(struct kunit_suite *suite)93{94vfree(orig_test_buf);95orig_test_buf = NULL;96test_buf = NULL;97}9899/*100* Test the hash function against a list of test vectors.101*102* Note that it's only necessary to run each test vector in one way (e.g.,103* one-shot instead of incremental), since consistency between different ways of104* using the APIs is verified by other test cases.105*/106static void test_hash_test_vectors(struct kunit *test)107{108for (size_t i = 0; i < ARRAY_SIZE(hash_testvecs); i++) {109size_t data_len = hash_testvecs[i].data_len;110u8 actual_hash[HASH_SIZE];111112KUNIT_ASSERT_LE(test, data_len, TEST_BUF_LEN);113rand_bytes_seeded_from_len(test_buf, data_len);114115HASH(test_buf, data_len, actual_hash);116KUNIT_ASSERT_MEMEQ_MSG(117test, actual_hash, hash_testvecs[i].digest, HASH_SIZE,118"Wrong result with test vector %zu; data_len=%zu", i,119data_len);120}121}122123/*124* Test that the hash function produces correct results for *every* length up to125* 4096 bytes. To do this, generate seeded random data, then calculate a hash126* value for each length 0..4096, then hash the hash values. Verify just the127* final hash value, which should match only when all hash values were correct.128*/129static void test_hash_all_lens_up_to_4096(struct kunit *test)130{131struct HASH_CTX ctx;132u8 hash[HASH_SIZE];133134static_assert(TEST_BUF_LEN >= 4096);135rand_bytes_seeded_from_len(test_buf, 4096);136HASH_INIT(&ctx);137for (size_t len = 0; len <= 4096; len++) {138HASH(test_buf, len, hash);139HASH_UPDATE(&ctx, hash, HASH_SIZE);140}141HASH_FINAL(&ctx, hash);142KUNIT_ASSERT_MEMEQ(test, hash, hash_testvec_consolidated, HASH_SIZE);143}144145/*146* Test that the hash function produces the same result with a one-shot147* computation as it does with an incremental computation.148*/149static void test_hash_incremental_updates(struct kunit *test)150{151for (int i = 0; i < 1000; i++) {152size_t total_len, offset;153struct HASH_CTX ctx;154u8 hash1[HASH_SIZE];155u8 hash2[HASH_SIZE];156size_t num_parts = 0;157size_t remaining_len, cur_offset;158159total_len = rand_length(TEST_BUF_LEN);160offset = rand_offset(TEST_BUF_LEN - total_len);161rand_bytes(&test_buf[offset], total_len);162163/* Compute the hash value in one shot. */164HASH(&test_buf[offset], total_len, hash1);165166/*167* Compute the hash value incrementally, using a randomly168* selected sequence of update lengths that sum to total_len.169*/170HASH_INIT(&ctx);171remaining_len = total_len;172cur_offset = offset;173while (rand_bool()) {174size_t part_len = rand_length(remaining_len);175176HASH_UPDATE(&ctx, &test_buf[cur_offset], part_len);177num_parts++;178cur_offset += part_len;179remaining_len -= part_len;180}181if (remaining_len != 0 || rand_bool()) {182HASH_UPDATE(&ctx, &test_buf[cur_offset], remaining_len);183num_parts++;184}185HASH_FINAL(&ctx, hash2);186187/* Verify that the two hash values are the same. */188KUNIT_ASSERT_MEMEQ_MSG(189test, hash1, hash2, HASH_SIZE,190"Incremental test failed with total_len=%zu num_parts=%zu offset=%zu",191total_len, num_parts, offset);192}193}194195/*196* Test that the hash function does not overrun any buffers. Uses a guard page197* to catch buffer overruns even if they occur in assembly code.198*/199static void test_hash_buffer_overruns(struct kunit *test)200{201const size_t max_tested_len = TEST_BUF_LEN - sizeof(struct HASH_CTX);202void *const buf_end = &test_buf[TEST_BUF_LEN];203struct HASH_CTX *guarded_ctx = buf_end - sizeof(*guarded_ctx);204205rand_bytes(test_buf, TEST_BUF_LEN);206207for (int i = 0; i < 100; i++) {208size_t len = rand_length(max_tested_len);209struct HASH_CTX ctx;210u8 hash[HASH_SIZE];211212/* Check for overruns of the data buffer. */213HASH(buf_end - len, len, hash);214HASH_INIT(&ctx);215HASH_UPDATE(&ctx, buf_end - len, len);216HASH_FINAL(&ctx, hash);217218/* Check for overruns of the hash value buffer. */219HASH(test_buf, len, buf_end - HASH_SIZE);220HASH_INIT(&ctx);221HASH_UPDATE(&ctx, test_buf, len);222HASH_FINAL(&ctx, buf_end - HASH_SIZE);223224/* Check for overuns of the hash context. */225HASH_INIT(guarded_ctx);226HASH_UPDATE(guarded_ctx, test_buf, len);227HASH_FINAL(guarded_ctx, hash);228}229}230231/*232* Test that the caller is permitted to alias the output digest and source data233* buffer, and also modify the source data buffer after it has been used.234*/235static void test_hash_overlaps(struct kunit *test)236{237const size_t max_tested_len = TEST_BUF_LEN - HASH_SIZE;238struct HASH_CTX ctx;239u8 hash[HASH_SIZE];240241rand_bytes(test_buf, TEST_BUF_LEN);242243for (int i = 0; i < 100; i++) {244size_t len = rand_length(max_tested_len);245size_t offset = HASH_SIZE + rand_offset(max_tested_len - len);246bool left_end = rand_bool();247u8 *ovl_hash = left_end ? &test_buf[offset] :248&test_buf[offset + len - HASH_SIZE];249250HASH(&test_buf[offset], len, hash);251HASH(&test_buf[offset], len, ovl_hash);252KUNIT_ASSERT_MEMEQ_MSG(253test, hash, ovl_hash, HASH_SIZE,254"Overlap test 1 failed with len=%zu offset=%zu left_end=%d",255len, offset, left_end);256257/* Repeat the above test, but this time use init+update+final */258HASH(&test_buf[offset], len, hash);259HASH_INIT(&ctx);260HASH_UPDATE(&ctx, &test_buf[offset], len);261HASH_FINAL(&ctx, ovl_hash);262KUNIT_ASSERT_MEMEQ_MSG(263test, hash, ovl_hash, HASH_SIZE,264"Overlap test 2 failed with len=%zu offset=%zu left_end=%d",265len, offset, left_end);266267/* Test modifying the source data after it was used. */268HASH(&test_buf[offset], len, hash);269HASH_INIT(&ctx);270HASH_UPDATE(&ctx, &test_buf[offset], len);271rand_bytes(&test_buf[offset], len);272HASH_FINAL(&ctx, ovl_hash);273KUNIT_ASSERT_MEMEQ_MSG(274test, hash, ovl_hash, HASH_SIZE,275"Overlap test 3 failed with len=%zu offset=%zu left_end=%d",276len, offset, left_end);277}278}279280/*281* Test that if the same data is hashed at different alignments in memory, the282* results are the same.283*/284static void test_hash_alignment_consistency(struct kunit *test)285{286u8 hash1[128 + HASH_SIZE];287u8 hash2[128 + HASH_SIZE];288289for (int i = 0; i < 100; i++) {290size_t len = rand_length(TEST_BUF_LEN);291size_t data_offs1 = rand_offset(TEST_BUF_LEN - len);292size_t data_offs2 = rand_offset(TEST_BUF_LEN - len);293size_t hash_offs1 = rand_offset(128);294size_t hash_offs2 = rand_offset(128);295296rand_bytes(&test_buf[data_offs1], len);297HASH(&test_buf[data_offs1], len, &hash1[hash_offs1]);298memmove(&test_buf[data_offs2], &test_buf[data_offs1], len);299HASH(&test_buf[data_offs2], len, &hash2[hash_offs2]);300KUNIT_ASSERT_MEMEQ_MSG(301test, &hash1[hash_offs1], &hash2[hash_offs2], HASH_SIZE,302"Alignment consistency test failed with len=%zu data_offs=(%zu,%zu) hash_offs=(%zu,%zu)",303len, data_offs1, data_offs2, hash_offs1, hash_offs2);304}305}306307/* Test that HASH_FINAL zeroizes the context. */308static void test_hash_ctx_zeroization(struct kunit *test)309{310static const u8 zeroes[sizeof(struct HASH_CTX)];311struct HASH_CTX ctx;312313rand_bytes(test_buf, 128);314HASH_INIT(&ctx);315HASH_UPDATE(&ctx, test_buf, 128);316HASH_FINAL(&ctx, test_buf);317KUNIT_ASSERT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),318"Hash context was not zeroized by finalization");319}320321#define IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)322323struct hash_irq_test_state {324bool (*func)(void *test_specific_state);325void *test_specific_state;326bool task_func_reported_failure;327bool hardirq_func_reported_failure;328bool softirq_func_reported_failure;329unsigned long hardirq_func_calls;330unsigned long softirq_func_calls;331struct hrtimer timer;332struct work_struct bh_work;333};334335static enum hrtimer_restart hash_irq_test_timer_func(struct hrtimer *timer)336{337struct hash_irq_test_state *state =338container_of(timer, typeof(*state), timer);339340WARN_ON_ONCE(!in_hardirq());341state->hardirq_func_calls++;342343if (!state->func(state->test_specific_state))344state->hardirq_func_reported_failure = true;345346hrtimer_forward_now(&state->timer, IRQ_TEST_HRTIMER_INTERVAL);347queue_work(system_bh_wq, &state->bh_work);348return HRTIMER_RESTART;349}350351static void hash_irq_test_bh_work_func(struct work_struct *work)352{353struct hash_irq_test_state *state =354container_of(work, typeof(*state), bh_work);355356WARN_ON_ONCE(!in_serving_softirq());357state->softirq_func_calls++;358359if (!state->func(state->test_specific_state))360state->softirq_func_reported_failure = true;361}362363/*364* Helper function which repeatedly runs the given @func in task, softirq, and365* hardirq context concurrently, and reports a failure to KUnit if any366* invocation of @func in any context returns false. @func is passed367* @test_specific_state as its argument. At most 3 invocations of @func will368* run concurrently: one in each of task, softirq, and hardirq context.369*370* The main purpose of this interrupt context testing is to validate fallback371* code paths that run in contexts where the normal code path cannot be used,372* typically due to the FPU or vector registers already being in-use in kernel373* mode. These code paths aren't covered when the test code is executed only by374* the KUnit test runner thread in task context. The reason for the concurrency375* is because merely using hardirq context is not sufficient to reach a fallback376* code path on some architectures; the hardirq actually has to occur while the377* FPU or vector unit was already in-use in kernel mode.378*379* Another purpose of this testing is to detect issues with the architecture's380* irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,381* especially in softirq context when the softirq may have interrupted a task382* already using kernel-mode FPU or vector (if the arch didn't prevent that).383* Crypto functions are often executed in softirqs, so this is important.384*/385static void run_irq_test(struct kunit *test, bool (*func)(void *),386int max_iterations, void *test_specific_state)387{388struct hash_irq_test_state state = {389.func = func,390.test_specific_state = test_specific_state,391};392unsigned long end_jiffies;393394/*395* Set up a hrtimer (the way we access hardirq context) and a work396* struct for the BH workqueue (the way we access softirq context).397*/398hrtimer_setup_on_stack(&state.timer, hash_irq_test_timer_func,399CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);400INIT_WORK_ONSTACK(&state.bh_work, hash_irq_test_bh_work_func);401402/* Run for up to max_iterations or 1 second, whichever comes first. */403end_jiffies = jiffies + HZ;404hrtimer_start(&state.timer, IRQ_TEST_HRTIMER_INTERVAL,405HRTIMER_MODE_REL_HARD);406for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies);407i++) {408if (!func(test_specific_state))409state.task_func_reported_failure = true;410}411412/* Cancel the timer and work. */413hrtimer_cancel(&state.timer);414flush_work(&state.bh_work);415416/* Sanity check: the timer and BH functions should have been run. */417KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0,418"Timer function was not called");419KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0,420"BH work function was not called");421422/* Check for incorrect hash values reported from any context. */423KUNIT_EXPECT_FALSE_MSG(424test, state.task_func_reported_failure,425"Incorrect hash values reported from task context");426KUNIT_EXPECT_FALSE_MSG(427test, state.hardirq_func_reported_failure,428"Incorrect hash values reported from hardirq context");429KUNIT_EXPECT_FALSE_MSG(430test, state.softirq_func_reported_failure,431"Incorrect hash values reported from softirq context");432}433434#define IRQ_TEST_DATA_LEN 256435#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */436437struct hash_irq_test1_state {438u8 expected_hashes[IRQ_TEST_NUM_BUFFERS][HASH_SIZE];439atomic_t seqno;440};441442/*443* Compute the hash of one of the test messages and verify that it matches the444* expected hash from @state->expected_hashes. To increase the chance of445* detecting problems, cycle through multiple messages.446*/447static bool hash_irq_test1_func(void *state_)448{449struct hash_irq_test1_state *state = state_;450u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;451u8 actual_hash[HASH_SIZE];452453HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN, actual_hash);454return memcmp(actual_hash, state->expected_hashes[i], HASH_SIZE) == 0;455}456457/*458* Test that if hashes are computed in task, softirq, and hardirq context459* concurrently, then all results are as expected.460*/461static void test_hash_interrupt_context_1(struct kunit *test)462{463struct hash_irq_test1_state state = {};464465/* Prepare some test messages and compute the expected hash of each. */466rand_bytes(test_buf, IRQ_TEST_NUM_BUFFERS * IRQ_TEST_DATA_LEN);467for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++)468HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN,469state.expected_hashes[i]);470471run_irq_test(test, hash_irq_test1_func, 100000, &state);472}473474struct hash_irq_test2_hash_ctx {475struct HASH_CTX hash_ctx;476atomic_t in_use;477int offset;478int step;479};480481struct hash_irq_test2_state {482struct hash_irq_test2_hash_ctx ctxs[IRQ_TEST_NUM_BUFFERS];483u8 expected_hash[HASH_SIZE];484u16 update_lens[32];485int num_steps;486};487488static bool hash_irq_test2_func(void *state_)489{490struct hash_irq_test2_state *state = state_;491struct hash_irq_test2_hash_ctx *ctx;492bool ret = true;493494for (ctx = &state->ctxs[0]; ctx < &state->ctxs[ARRAY_SIZE(state->ctxs)];495ctx++) {496if (atomic_cmpxchg(&ctx->in_use, 0, 1) == 0)497break;498}499if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) {500/*501* This should never happen, as the number of contexts is equal502* to the maximum concurrency level of run_irq_test().503*/504return false;505}506507if (ctx->step == 0) {508/* Init step */509HASH_INIT(&ctx->hash_ctx);510ctx->offset = 0;511ctx->step++;512} else if (ctx->step < state->num_steps - 1) {513/* Update step */514HASH_UPDATE(&ctx->hash_ctx, &test_buf[ctx->offset],515state->update_lens[ctx->step - 1]);516ctx->offset += state->update_lens[ctx->step - 1];517ctx->step++;518} else {519/* Final step */520u8 actual_hash[HASH_SIZE];521522if (WARN_ON_ONCE(ctx->offset != TEST_BUF_LEN))523ret = false;524HASH_FINAL(&ctx->hash_ctx, actual_hash);525if (memcmp(actual_hash, state->expected_hash, HASH_SIZE) != 0)526ret = false;527ctx->step = 0;528}529atomic_set_release(&ctx->in_use, 0);530return ret;531}532533/*534* Test that if hashes are computed in task, softirq, and hardirq context535* concurrently, *including doing different parts of the same incremental536* computation in different contexts*, then all results are as expected.537* Besides detecting bugs similar to those that test_hash_interrupt_context_1538* can detect, this test case can also detect bugs where hash function539* implementations don't correctly handle these mixed incremental computations.540*/541static void test_hash_interrupt_context_2(struct kunit *test)542{543struct hash_irq_test2_state *state;544int remaining = TEST_BUF_LEN;545546state = kunit_kzalloc(test, sizeof(*state), GFP_KERNEL);547KUNIT_ASSERT_NOT_NULL(test, state);548549rand_bytes(test_buf, TEST_BUF_LEN);550HASH(test_buf, TEST_BUF_LEN, state->expected_hash);551552/*553* Generate a list of update lengths to use. Ensure that it contains554* multiple entries but is limited to a maximum length.555*/556static_assert(TEST_BUF_LEN / 4096 > 1);557for (state->num_steps = 0;558state->num_steps < ARRAY_SIZE(state->update_lens) - 1 && remaining;559state->num_steps++) {560state->update_lens[state->num_steps] =561rand_length(min(remaining, 4096));562remaining -= state->update_lens[state->num_steps];563}564if (remaining)565state->update_lens[state->num_steps++] = remaining;566state->num_steps += 2; /* for init and final */567568run_irq_test(test, hash_irq_test2_func, 250000, state);569}570571#define UNKEYED_HASH_KUNIT_CASES \572KUNIT_CASE(test_hash_test_vectors), \573KUNIT_CASE(test_hash_all_lens_up_to_4096), \574KUNIT_CASE(test_hash_incremental_updates), \575KUNIT_CASE(test_hash_buffer_overruns), \576KUNIT_CASE(test_hash_overlaps), \577KUNIT_CASE(test_hash_alignment_consistency), \578KUNIT_CASE(test_hash_ctx_zeroization), \579KUNIT_CASE(test_hash_interrupt_context_1), \580KUNIT_CASE(test_hash_interrupt_context_2)581/* benchmark_hash is omitted so that the suites can put it last. */582583#ifdef HMAC584/*585* Test the corresponding HMAC variant.586*587* This test case is fairly short, since HMAC is just a simple C wrapper around588* the underlying unkeyed hash function, which is already well-tested by the589* other test cases. It's not useful to test things like data alignment or590* interrupt context again for HMAC, nor to have a long list of test vectors.591*592* Thus, just do a single consolidated test, which covers all data lengths up to593* 4096 bytes and all key lengths up to 292 bytes. For each data length, select594* a key length, generate the inputs from a seed, and compute the HMAC value.595* Concatenate all these HMAC values together, and compute the HMAC of that.596* Verify that value. If this fails, then the HMAC implementation is wrong.597* This won't show which specific input failed, but that should be fine. Any598* failure would likely be non-input-specific or also show in the unkeyed tests.599*/600static void test_hmac(struct kunit *test)601{602static const u8 zeroes[sizeof(struct HMAC_CTX)];603u8 *raw_key;604struct HMAC_KEY key;605struct HMAC_CTX ctx;606u8 mac[HASH_SIZE];607u8 mac2[HASH_SIZE];608609static_assert(TEST_BUF_LEN >= 4096 + 293);610rand_bytes_seeded_from_len(test_buf, 4096);611raw_key = &test_buf[4096];612613rand_bytes_seeded_from_len(raw_key, 32);614HMAC_PREPAREKEY(&key, raw_key, 32);615HMAC_INIT(&ctx, &key);616for (size_t data_len = 0; data_len <= 4096; data_len++) {617/*618* Cycle through key lengths as well. Somewhat arbitrarily go619* up to 293, which is somewhat larger than the largest hash620* block size (which is the size at which the key starts being621* hashed down to one block); going higher would not be useful.622* To reduce correlation with data_len, use a prime number here.623*/624size_t key_len = data_len % 293;625626HMAC_UPDATE(&ctx, test_buf, data_len);627628rand_bytes_seeded_from_len(raw_key, key_len);629HMAC_USINGRAWKEY(raw_key, key_len, test_buf, data_len, mac);630HMAC_UPDATE(&ctx, mac, HASH_SIZE);631632/* Verify that HMAC() is consistent with HMAC_USINGRAWKEY(). */633HMAC_PREPAREKEY(&key, raw_key, key_len);634HMAC(&key, test_buf, data_len, mac2);635KUNIT_ASSERT_MEMEQ_MSG(636test, mac, mac2, HASH_SIZE,637"HMAC gave different results with raw and prepared keys");638}639HMAC_FINAL(&ctx, mac);640KUNIT_EXPECT_MEMEQ_MSG(test, mac, hmac_testvec_consolidated, HASH_SIZE,641"HMAC gave wrong result");642KUNIT_EXPECT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),643"HMAC context was not zeroized by finalization");644}645#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES, KUNIT_CASE(test_hmac)646#else647#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES648#endif649650/* Benchmark the hash function on various data lengths. */651static void benchmark_hash(struct kunit *test)652{653static const size_t lens_to_test[] = {6541, 16, 64, 127, 128, 200, 256,655511, 512, 1024, 3173, 4096, 16384,656};657u8 hash[HASH_SIZE];658659if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK))660kunit_skip(test, "not enabled");661662/* Warm-up */663for (size_t i = 0; i < 10000000; i += TEST_BUF_LEN)664HASH(test_buf, TEST_BUF_LEN, hash);665666for (size_t i = 0; i < ARRAY_SIZE(lens_to_test); i++) {667size_t len = lens_to_test[i];668/* The '+ 128' tries to account for per-message overhead. */669size_t num_iters = 10000000 / (len + 128);670u64 t;671672KUNIT_ASSERT_LE(test, len, TEST_BUF_LEN);673preempt_disable();674t = ktime_get_ns();675for (size_t j = 0; j < num_iters; j++)676HASH(test_buf, len, hash);677t = ktime_get_ns() - t;678preempt_enable();679kunit_info(test, "len=%zu: %llu MB/s", len,680div64_u64((u64)len * num_iters * 1000, t ?: 1));681}682}683684685