Path: blob/master/thirdparty/mbedtls/library/constant_time.c
9903 views
/**1* Constant-time functions2*3* Copyright The Mbed TLS Contributors4* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later5*/67/*8* The following functions are implemented without using comparison operators, as those9* might be translated to branches by some compilers on some platforms.10*/1112#include <stdint.h>13#include <limits.h>1415#include "common.h"16#include "constant_time_internal.h"17#include "mbedtls/constant_time.h"18#include "mbedtls/error.h"19#include "mbedtls/platform_util.h"2021#include <string.h>2223#if !defined(MBEDTLS_CT_ASM)24/*25* Define an object with the value zero, such that the compiler cannot prove that it26* has the value zero (because it is volatile, it "may be modified in ways unknown to27* the implementation").28*/29volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0;30#endif3132/*33* Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to34* perform fast unaligned access to volatile data.35*36* This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile37* memory accesses.38*39* Some of these definitions could be moved into alignment.h but for now they are40* only used here.41*/42#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \43((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \44defined(MBEDTLS_CT_AARCH64_ASM))45/* We check pointer sizes to avoid issues with them not matching register size requirements */46#define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS4748static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p)49{50/* This is UB, even where it's safe:51* return *((volatile uint32_t*)p);52* so instead the same thing is expressed in assembly below.53*/54uint32_t r;55#if defined(MBEDTLS_CT_ARM_ASM)56asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :);57#elif defined(MBEDTLS_CT_AARCH64_ASM)58asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :);59#else60#error "No assembly defined for mbedtls_get_unaligned_volatile_uint32"61#endif62return r;63}64#endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) &&65(defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */6667int mbedtls_ct_memcmp(const void *a,68const void *b,69size_t n)70{71size_t i = 0;72/*73* `A` and `B` are cast to volatile to ensure that the compiler74* generates code that always fully reads both buffers.75* Otherwise it could generate a test to exit early if `diff` has all76* bits set early in the loop.77*/78volatile const unsigned char *A = (volatile const unsigned char *) a;79volatile const unsigned char *B = (volatile const unsigned char *) b;80uint32_t diff = 0;8182#if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS)83for (; (i + 4) <= n; i += 4) {84uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i);85uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i);86diff |= x ^ y;87}88#endif8990for (; i < n; i++) {91/* Read volatile data in order before computing diff.92* This avoids IAR compiler warning:93* 'the order of volatile accesses is undefined ..' */94unsigned char x = A[i], y = B[i];95diff |= x ^ y;96}979899#if (INT_MAX < INT32_MAX)100/* We don't support int smaller than 32-bits, but if someone tried to build101* with this configuration, there is a risk that, for differing data, the102* only bits set in diff are in the top 16-bits, and would be lost by a103* simple cast from uint32 to int.104* This would have significant security implications, so protect against it. */105#error "mbedtls_ct_memcmp() requires minimum 32-bit ints"106#else107/* The bit-twiddling ensures that when we cast uint32_t to int, we are casting108* a value that is in the range 0..INT_MAX - a value larger than this would109* result in implementation defined behaviour.110*111* This ensures that the value returned by the function is non-zero iff112* diff is non-zero.113*/114return (int) ((diff & 0xffff) | (diff >> 16));115#endif116}117118#if defined(MBEDTLS_NIST_KW_C)119120int mbedtls_ct_memcmp_partial(const void *a,121const void *b,122size_t n,123size_t skip_head,124size_t skip_tail)125{126unsigned int diff = 0;127128volatile const unsigned char *A = (volatile const unsigned char *) a;129volatile const unsigned char *B = (volatile const unsigned char *) b;130131size_t valid_end = n - skip_tail;132133for (size_t i = 0; i < n; i++) {134unsigned char x = A[i], y = B[i];135unsigned int d = x ^ y;136mbedtls_ct_condition_t valid = mbedtls_ct_bool_and(mbedtls_ct_uint_ge(i, skip_head),137mbedtls_ct_uint_lt(i, valid_end));138diff |= mbedtls_ct_uint_if_else_0(valid, d);139}140141/* Since we go byte-by-byte, the only bits set will be in the bottom 8 bits, so the142* cast from uint to int is safe. */143return (int) diff;144}145146#endif147148#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)149150void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset)151{152volatile unsigned char *buf = start;153for (size_t i = 0; i < total; i++) {154mbedtls_ct_condition_t no_op = mbedtls_ct_uint_gt(total - offset, i);155/* The first `total - offset` passes are a no-op. The last156* `offset` passes shift the data one byte to the left and157* zero out the last byte. */158for (size_t n = 0; n < total - 1; n++) {159unsigned char current = buf[n];160unsigned char next = buf[n+1];161buf[n] = mbedtls_ct_uint_if(no_op, current, next);162}163buf[total-1] = mbedtls_ct_uint_if_else_0(no_op, buf[total-1]);164}165}166167#endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */168169void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,170unsigned char *dest,171const unsigned char *src1,172const unsigned char *src2,173size_t len)174{175#if defined(MBEDTLS_CT_SIZE_64)176const uint64_t mask = (uint64_t) condition;177const uint64_t not_mask = (uint64_t) ~mbedtls_ct_compiler_opaque(condition);178#else179const uint32_t mask = (uint32_t) condition;180const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition);181#endif182183/* If src2 is NULL, setup src2 so that we read from the destination address.184*185* This means that if src2 == NULL && condition is false, the result will be a186* no-op because we read from dest and write the same data back into dest.187*/188if (src2 == NULL) {189src2 = dest;190}191192/* dest[i] = c1 == c2 ? src[i] : dest[i] */193size_t i = 0;194#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)195#if defined(MBEDTLS_CT_SIZE_64)196for (; (i + 8) <= len; i += 8) {197uint64_t a = mbedtls_get_unaligned_uint64(src1 + i) & mask;198uint64_t b = mbedtls_get_unaligned_uint64(src2 + i) & not_mask;199mbedtls_put_unaligned_uint64(dest + i, a | b);200}201#else202for (; (i + 4) <= len; i += 4) {203uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask;204uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask;205mbedtls_put_unaligned_uint32(dest + i, a | b);206}207#endif /* defined(MBEDTLS_CT_SIZE_64) */208#endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */209for (; i < len; i++) {210dest[i] = (src1[i] & mask) | (src2[i] & not_mask);211}212}213214void mbedtls_ct_memcpy_offset(unsigned char *dest,215const unsigned char *src,216size_t offset,217size_t offset_min,218size_t offset_max,219size_t len)220{221size_t offsetval;222223for (offsetval = offset_min; offsetval <= offset_max; offsetval++) {224mbedtls_ct_memcpy_if(mbedtls_ct_uint_eq(offsetval, offset), dest, src + offsetval, NULL,225len);226}227}228229#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)230231void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len)232{233uint32_t mask = (uint32_t) ~condition;234uint8_t *p = (uint8_t *) buf;235size_t i = 0;236#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)237for (; (i + 4) <= len; i += 4) {238mbedtls_put_unaligned_uint32((void *) (p + i),239mbedtls_get_unaligned_uint32((void *) (p + i)) & mask);240}241#endif242for (; i < len; i++) {243p[i] = p[i] & mask;244}245}246247#endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */248249250