Path: blob/master/net/sunrpc/auth_gss/gss_krb5_wrap.c
15112 views
/*1* COPYRIGHT (c) 20082* The Regents of the University of Michigan3* ALL RIGHTS RESERVED4*5* Permission is granted to use, copy, create derivative works6* and redistribute this software and such derivative works7* for any purpose, so long as the name of The University of8* Michigan is not used in any advertising or publicity9* pertaining to the use of distribution of this software10* without specific, written prior authorization. If the11* above copyright notice or any other identification of the12* University of Michigan is included in any copy of any13* portion of this software, then the disclaimer below must14* also be included.15*16* THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION17* FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY18* PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF19* MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING20* WITHOUT LIMITATION THE IMPLIED WARRANTIES OF21* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE22* REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE23* FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR24* CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING25* OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN26* IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF27* SUCH DAMAGES.28*/2930#include <linux/types.h>31#include <linux/jiffies.h>32#include <linux/sunrpc/gss_krb5.h>33#include <linux/random.h>34#include <linux/pagemap.h>35#include <linux/crypto.h>3637#ifdef RPC_DEBUG38# define RPCDBG_FACILITY RPCDBG_AUTH39#endif4041static inline int42gss_krb5_padding(int blocksize, int length)43{44return blocksize - (length % blocksize);45}4647static inline void48gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)49{50int padding = gss_krb5_padding(blocksize, buf->len - offset);51char *p;52struct kvec *iov;5354if (buf->page_len || buf->tail[0].iov_len)55iov = &buf->tail[0];56else57iov = &buf->head[0];58p = iov->iov_base + iov->iov_len;59iov->iov_len += padding;60buf->len += padding;61memset(p, padding, padding);62}6364static inline int65gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)66{67u8 *ptr;68u8 pad;69size_t len = buf->len;7071if (len <= buf->head[0].iov_len) {72pad = *(u8 *)(buf->head[0].iov_base + len - 1);73if (pad > buf->head[0].iov_len)74return -EINVAL;75buf->head[0].iov_len -= pad;76goto out;77} else78len -= buf->head[0].iov_len;79if (len <= buf->page_len) {80unsigned int last = (buf->page_base + len - 1)81>>PAGE_CACHE_SHIFT;82unsigned int offset = (buf->page_base + len - 1)83& (PAGE_CACHE_SIZE - 1);84ptr = kmap_atomic(buf->pages[last], KM_USER0);85pad = *(ptr + offset);86kunmap_atomic(ptr, KM_USER0);87goto out;88} else89len -= buf->page_len;90BUG_ON(len > buf->tail[0].iov_len);91pad = *(u8 *)(buf->tail[0].iov_base + len - 1);92out:93/* XXX: NOTE: we do not adjust the page lengths--they represent94* a range of data in the real filesystem page cache, and we need95* to know that range so the xdr code can properly place read data.96* However adjusting the head length, as we do above, is harmless.97* In the case of a request that fits into a single page, the server98* also uses length and head length together to determine the original99* start of the request to copy the request for deferal; so it's100* easier on the server if we adjust head and tail length in tandem.101* It's not really a problem that we don't fool with the page and102* tail lengths, though--at worst badly formed xdr might lead the103* server to attempt to parse the padding.104* XXX: Document all these weird requirements for gss mechanism105* wrap/unwrap functions. */106if (pad > blocksize)107return -EINVAL;108if (buf->len > pad)109buf->len -= pad;110else111return -EINVAL;112return 0;113}114115void116gss_krb5_make_confounder(char *p, u32 conflen)117{118static u64 i = 0;119u64 *q = (u64 *)p;120121/* rfc1964 claims this should be "random". But all that's really122* necessary is that it be unique. And not even that is necessary in123* our case since our "gssapi" implementation exists only to support124* rpcsec_gss, so we know that the only buffers we will ever encrypt125* already begin with a unique sequence number. Just to hedge my bets126* I'll make a half-hearted attempt at something unique, but ensuring127* uniqueness would mean worrying about atomicity and rollover, and I128* don't care enough. */129130/* initialize to random value */131if (i == 0) {132i = random32();133i = (i << 32) | random32();134}135136switch (conflen) {137case 16:138*q++ = i++;139/* fall through */140case 8:141*q++ = i++;142break;143default:144BUG();145}146}147148/* Assumptions: the head and tail of inbuf are ours to play with.149* The pages, however, may be real pages in the page cache and we replace150* them with scratch pages from **pages before writing to them. */151/* XXX: obviously the above should be documentation of wrap interface,152* and shouldn't be in this kerberos-specific file. */153154/* XXX factor out common code with seal/unseal. */155156static u32157gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,158struct xdr_buf *buf, struct page **pages)159{160char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];161struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),162.data = cksumdata};163int blocksize = 0, plainlen;164unsigned char *ptr, *msg_start;165s32 now;166int headlen;167struct page **tmp_pages;168u32 seq_send;169u8 *cksumkey;170u32 conflen = kctx->gk5e->conflen;171172dprintk("RPC: %s\n", __func__);173174now = get_seconds();175176blocksize = crypto_blkcipher_blocksize(kctx->enc);177gss_krb5_add_padding(buf, offset, blocksize);178BUG_ON((buf->len - offset) % blocksize);179plainlen = conflen + buf->len - offset;180181headlen = g_token_size(&kctx->mech_used,182GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -183(buf->len - offset);184185ptr = buf->head[0].iov_base + offset;186/* shift data to make room for header. */187xdr_extend_head(buf, offset, headlen);188189/* XXX Would be cleverer to encrypt while copying. */190BUG_ON((buf->len - offset - headlen) % blocksize);191192g_make_token_header(&kctx->mech_used,193GSS_KRB5_TOK_HDR_LEN +194kctx->gk5e->cksumlength + plainlen, &ptr);195196197/* ptr now at header described in rfc 1964, section 1.2.1: */198ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);199ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);200201msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;202203*(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);204memset(ptr + 4, 0xff, 4);205*(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);206207gss_krb5_make_confounder(msg_start, conflen);208209if (kctx->gk5e->keyed_cksum)210cksumkey = kctx->cksum;211else212cksumkey = NULL;213214/* XXXJBF: UGH!: */215tmp_pages = buf->pages;216buf->pages = pages;217if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,218cksumkey, KG_USAGE_SEAL, &md5cksum))219return GSS_S_FAILURE;220buf->pages = tmp_pages;221222memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);223224spin_lock(&krb5_seq_lock);225seq_send = kctx->seq_send++;226spin_unlock(&krb5_seq_lock);227228/* XXX would probably be more efficient to compute checksum229* and encrypt at the same time: */230if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,231seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))232return GSS_S_FAILURE;233234if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {235struct crypto_blkcipher *cipher;236int err;237cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,238CRYPTO_ALG_ASYNC);239if (IS_ERR(cipher))240return GSS_S_FAILURE;241242krb5_rc4_setup_enc_key(kctx, cipher, seq_send);243244err = gss_encrypt_xdr_buf(cipher, buf,245offset + headlen - conflen, pages);246crypto_free_blkcipher(cipher);247if (err)248return GSS_S_FAILURE;249} else {250if (gss_encrypt_xdr_buf(kctx->enc, buf,251offset + headlen - conflen, pages))252return GSS_S_FAILURE;253}254255return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;256}257258static u32259gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)260{261int signalg;262int sealalg;263char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];264struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),265.data = cksumdata};266s32 now;267int direction;268s32 seqnum;269unsigned char *ptr;270int bodysize;271void *data_start, *orig_start;272int data_len;273int blocksize;274u32 conflen = kctx->gk5e->conflen;275int crypt_offset;276u8 *cksumkey;277278dprintk("RPC: gss_unwrap_kerberos\n");279280ptr = (u8 *)buf->head[0].iov_base + offset;281if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,282buf->len - offset))283return GSS_S_DEFECTIVE_TOKEN;284285if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||286(ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))287return GSS_S_DEFECTIVE_TOKEN;288289/* XXX sanity-check bodysize?? */290291/* get the sign and seal algorithms */292293signalg = ptr[2] + (ptr[3] << 8);294if (signalg != kctx->gk5e->signalg)295return GSS_S_DEFECTIVE_TOKEN;296297sealalg = ptr[4] + (ptr[5] << 8);298if (sealalg != kctx->gk5e->sealalg)299return GSS_S_DEFECTIVE_TOKEN;300301if ((ptr[6] != 0xff) || (ptr[7] != 0xff))302return GSS_S_DEFECTIVE_TOKEN;303304/*305* Data starts after token header and checksum. ptr points306* to the beginning of the token header307*/308crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -309(unsigned char *)buf->head[0].iov_base;310311/*312* Need plaintext seqnum to derive encryption key for arcfour-hmac313*/314if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,315ptr + 8, &direction, &seqnum))316return GSS_S_BAD_SIG;317318if ((kctx->initiate && direction != 0xff) ||319(!kctx->initiate && direction != 0))320return GSS_S_BAD_SIG;321322if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {323struct crypto_blkcipher *cipher;324int err;325326cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,327CRYPTO_ALG_ASYNC);328if (IS_ERR(cipher))329return GSS_S_FAILURE;330331krb5_rc4_setup_enc_key(kctx, cipher, seqnum);332333err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);334crypto_free_blkcipher(cipher);335if (err)336return GSS_S_DEFECTIVE_TOKEN;337} else {338if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))339return GSS_S_DEFECTIVE_TOKEN;340}341342if (kctx->gk5e->keyed_cksum)343cksumkey = kctx->cksum;344else345cksumkey = NULL;346347if (make_checksum(kctx, ptr, 8, buf, crypt_offset,348cksumkey, KG_USAGE_SEAL, &md5cksum))349return GSS_S_FAILURE;350351if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,352kctx->gk5e->cksumlength))353return GSS_S_BAD_SIG;354355/* it got through unscathed. Make sure the context is unexpired */356357now = get_seconds();358359if (now > kctx->endtime)360return GSS_S_CONTEXT_EXPIRED;361362/* do sequencing checks */363364/* Copy the data back to the right position. XXX: Would probably be365* better to copy and encrypt at the same time. */366367blocksize = crypto_blkcipher_blocksize(kctx->enc);368data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +369conflen;370orig_start = buf->head[0].iov_base + offset;371data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;372memmove(orig_start, data_start, data_len);373buf->head[0].iov_len -= (data_start - orig_start);374buf->len -= (data_start - orig_start);375376if (gss_krb5_remove_padding(buf, blocksize))377return GSS_S_DEFECTIVE_TOKEN;378379return GSS_S_COMPLETE;380}381382/*383* We cannot currently handle tokens with rotated data. We need a384* generalized routine to rotate the data in place. It is anticipated385* that we won't encounter rotated data in the general case.386*/387static u32388rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)389{390unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);391392if (realrrc == 0)393return 0;394395dprintk("%s: cannot process token with rotated data: "396"rrc %u, realrrc %u\n", __func__, rrc, realrrc);397return 1;398}399400static u32401gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,402struct xdr_buf *buf, struct page **pages)403{404int blocksize;405u8 *ptr, *plainhdr;406s32 now;407u8 flags = 0x00;408__be16 *be16ptr, ec = 0;409__be64 *be64ptr;410u32 err;411412dprintk("RPC: %s\n", __func__);413414if (kctx->gk5e->encrypt_v2 == NULL)415return GSS_S_FAILURE;416417/* make room for gss token header */418if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))419return GSS_S_FAILURE;420421/* construct gss token header */422ptr = plainhdr = buf->head[0].iov_base + offset;423*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);424*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);425426if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)427flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;428if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)429flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;430/* We always do confidentiality in wrap tokens */431flags |= KG2_TOKEN_FLAG_SEALED;432433*ptr++ = flags;434*ptr++ = 0xff;435be16ptr = (__be16 *)ptr;436437blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);438*be16ptr++ = cpu_to_be16(ec);439/* "inner" token header always uses 0 for RRC */440*be16ptr++ = cpu_to_be16(0);441442be64ptr = (__be64 *)be16ptr;443spin_lock(&krb5_seq_lock);444*be64ptr = cpu_to_be64(kctx->seq_send64++);445spin_unlock(&krb5_seq_lock);446447err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);448if (err)449return err;450451now = get_seconds();452return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;453}454455static u32456gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)457{458s32 now;459u64 seqnum;460u8 *ptr;461u8 flags = 0x00;462u16 ec, rrc;463int err;464u32 headskip, tailskip;465u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];466unsigned int movelen;467468469dprintk("RPC: %s\n", __func__);470471if (kctx->gk5e->decrypt_v2 == NULL)472return GSS_S_FAILURE;473474ptr = buf->head[0].iov_base + offset;475476if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)477return GSS_S_DEFECTIVE_TOKEN;478479flags = ptr[2];480if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||481(kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))482return GSS_S_BAD_SIG;483484if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {485dprintk("%s: token missing expected sealed flag\n", __func__);486return GSS_S_DEFECTIVE_TOKEN;487}488489if (ptr[3] != 0xff)490return GSS_S_DEFECTIVE_TOKEN;491492ec = be16_to_cpup((__be16 *)(ptr + 4));493rrc = be16_to_cpup((__be16 *)(ptr + 6));494495seqnum = be64_to_cpup((__be64 *)(ptr + 8));496497if (rrc != 0) {498err = rotate_left(kctx, offset, buf, rrc);499if (err)500return GSS_S_FAILURE;501}502503err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,504&headskip, &tailskip);505if (err)506return GSS_S_FAILURE;507508/*509* Retrieve the decrypted gss token header and verify510* it against the original511*/512err = read_bytes_from_xdr_buf(buf,513buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,514decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);515if (err) {516dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);517return GSS_S_FAILURE;518}519if (memcmp(ptr, decrypted_hdr, 6)520|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {521dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);522return GSS_S_FAILURE;523}524525/* do sequencing checks */526527/* it got through unscathed. Make sure the context is unexpired */528now = get_seconds();529if (now > kctx->endtime)530return GSS_S_CONTEXT_EXPIRED;531532/*533* Move the head data back to the right position in xdr_buf.534* We ignore any "ec" data since it might be in the head or535* the tail, and we really don't need to deal with it.536* Note that buf->head[0].iov_len may indicate the available537* head buffer space rather than that actually occupied.538*/539movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);540movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;541BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >542buf->head[0].iov_len);543memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);544buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;545buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;546547return GSS_S_COMPLETE;548}549550u32551gss_wrap_kerberos(struct gss_ctx *gctx, int offset,552struct xdr_buf *buf, struct page **pages)553{554struct krb5_ctx *kctx = gctx->internal_ctx_id;555556switch (kctx->enctype) {557default:558BUG();559case ENCTYPE_DES_CBC_RAW:560case ENCTYPE_DES3_CBC_RAW:561case ENCTYPE_ARCFOUR_HMAC:562return gss_wrap_kerberos_v1(kctx, offset, buf, pages);563case ENCTYPE_AES128_CTS_HMAC_SHA1_96:564case ENCTYPE_AES256_CTS_HMAC_SHA1_96:565return gss_wrap_kerberos_v2(kctx, offset, buf, pages);566}567}568569u32570gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)571{572struct krb5_ctx *kctx = gctx->internal_ctx_id;573574switch (kctx->enctype) {575default:576BUG();577case ENCTYPE_DES_CBC_RAW:578case ENCTYPE_DES3_CBC_RAW:579case ENCTYPE_ARCFOUR_HMAC:580return gss_unwrap_kerberos_v1(kctx, offset, buf);581case ENCTYPE_AES128_CTS_HMAC_SHA1_96:582case ENCTYPE_AES256_CTS_HMAC_SHA1_96:583return gss_unwrap_kerberos_v2(kctx, offset, buf);584}585}586587588589