/*-1* Copyright (c) 2014 The FreeBSD Foundation2*3* This software was developed by John-Mark Gurney under4* the sponsorship of the FreeBSD Foundation and5* Rubicon Communications, LLC (Netgate).6* Redistribution and use in source and binary forms, with or without7* modification, are permitted provided that the following conditions8* are met:9* 1. Redistributions of source code must retain the above copyright10* notice, this list of conditions and the following disclaimer.11* 2. Redistributions in binary form must reproduce the above copyright12* notice, this list of conditions and the following disclaimer in the13* documentation and/or other materials provided with the distribution.14*15* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND16* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE17* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE18* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE19* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL20* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS21* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)22* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT23* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY24* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF25* SUCH DAMAGE.26*27*/2829#ifndef _GFMULT_H_30#define _GFMULT_H_3132#ifdef __APPLE__33#define __aligned(x) __attribute__((__aligned__(x)))34#define be64dec(buf) __builtin_bswap64(*(uint64_t *)buf)35#define be64enc(buf, x) (*(uint64_t *)buf = __builtin_bswap64(x))36#else37#include <sys/endian.h>38#endif3940#ifdef _KERNEL41#include <sys/types.h>42#else43#include <stdint.h>44#include <strings.h>45#endif4647#define REQ_ALIGN (16 * 4)48/*49* The rows are striped across cache lines. Note that the indexes50* are bit reversed to make accesses quicker.51*/52struct gf128table {53uint32_t a[16] __aligned(REQ_ALIGN); /* bits 0 - 31 */54uint32_t b[16] __aligned(REQ_ALIGN); /* bits 63 - 32 */55uint32_t c[16] __aligned(REQ_ALIGN); /* bits 95 - 64 */56uint32_t d[16] __aligned(REQ_ALIGN); /* bits 127 - 96 */57} __aligned(REQ_ALIGN);5859/*60* A set of tables that contain h, h^2, h^3, h^4. To be used w/ gf128_mul4.61*/62struct gf128table4 {63struct gf128table tbls[4];64};6566/*67* GCM per spec is bit reversed in memory. So byte 0 is really bit reversed68* and contains bits 0-7. We can deal w/ this by using right shifts and69* related math instead of having to bit reverse everything. This means that70* the low bits are in v[0] (bits 0-63) and reverse order, while the high71* bits are in v[1] (bits 64-127) and reverse order. The high bit of v[0] is72* bit 0, and the low bit of v[1] is bit 127.73*/74struct gf128 {75uint64_t v[2];76};7778/* Note that we don't bit reverse in MAKE_GF128. */79#define MAKE_GF128(a, b) ((struct gf128){.v = { (a), (b) } })80#define GF128_EQ(a, b) ((((a).v[0] ^ (b).v[0]) | \81((a).v[1] ^ (b).v[1])) == 0)8283static inline struct gf12884gf128_read(const uint8_t *buf)85{86struct gf128 r;8788r.v[0] = be64dec(buf);89buf += sizeof(uint64_t);9091r.v[1] = be64dec(buf);9293return r;94}9596static inline void97gf128_write(struct gf128 v, uint8_t *buf)98{99uint64_t tmp;100101be64enc(buf, v.v[0]);102buf += sizeof tmp;103104be64enc(buf, v.v[1]);105}106107static inline struct gf128 __pure /* XXX - __pure2 instead */108gf128_add(struct gf128 a, struct gf128 b)109{110a.v[0] ^= b.v[0];111a.v[1] ^= b.v[1];112113return a;114}115116void gf128_genmultable(struct gf128 h, struct gf128table *t);117void gf128_genmultable4(struct gf128 h, struct gf128table4 *t);118struct gf128 gf128_mul(struct gf128 v, struct gf128table *tbl);119struct gf128 gf128_mul4(struct gf128 a, struct gf128 b, struct gf128 c,120struct gf128 d, struct gf128table4 *tbl);121struct gf128 gf128_mul4b(struct gf128 r, const uint8_t *v,122struct gf128table4 *tbl);123124#endif /* _GFMULT_H_ */125126127