Path: blob/master/net/sunrpc/auth_gss/gss_krb5_crypto.c
15112 views
/*1* linux/net/sunrpc/gss_krb5_crypto.c2*3* Copyright (c) 2000-2008 The Regents of the University of Michigan.4* All rights reserved.5*6* Andy Adamson <[email protected]>7* Bruce Fields <[email protected]>8*/910/*11* Copyright (C) 1998 by the FundsXpress, INC.12*13* All rights reserved.14*15* Export of this software from the United States of America may require16* a specific license from the United States Government. It is the17* responsibility of any person or organization contemplating export to18* obtain such a license before exporting.19*20* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and21* distribute this software and its documentation for any purpose and22* without fee is hereby granted, provided that the above copyright23* notice appear in all copies and that both that copyright notice and24* this permission notice appear in supporting documentation, and that25* the name of FundsXpress. not be used in advertising or publicity pertaining26* to distribution of the software without specific, written prior27* permission. FundsXpress makes no representations about the suitability of28* this software for any purpose. It is provided "as is" without express29* or implied warranty.30*31* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR32* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED33* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.34*/3536#include <linux/err.h>37#include <linux/types.h>38#include <linux/mm.h>39#include <linux/scatterlist.h>40#include <linux/crypto.h>41#include <linux/highmem.h>42#include <linux/pagemap.h>43#include <linux/random.h>44#include <linux/sunrpc/gss_krb5.h>45#include <linux/sunrpc/xdr.h>4647#ifdef RPC_DEBUG48# define RPCDBG_FACILITY RPCDBG_AUTH49#endif5051u3252krb5_encrypt(53struct crypto_blkcipher *tfm,54void * iv,55void * in,56void * out,57int length)58{59u32 ret = -EINVAL;60struct scatterlist sg[1];61u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};62struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };6364if (length % crypto_blkcipher_blocksize(tfm) != 0)65goto out;6667if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {68dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",69crypto_blkcipher_ivsize(tfm));70goto out;71}7273if (iv)74memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));7576memcpy(out, in, length);77sg_init_one(sg, out, length);7879ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);80out:81dprintk("RPC: krb5_encrypt returns %d\n", ret);82return ret;83}8485u3286krb5_decrypt(87struct crypto_blkcipher *tfm,88void * iv,89void * in,90void * out,91int length)92{93u32 ret = -EINVAL;94struct scatterlist sg[1];95u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};96struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };9798if (length % crypto_blkcipher_blocksize(tfm) != 0)99goto out;100101if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {102dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",103crypto_blkcipher_ivsize(tfm));104goto out;105}106if (iv)107memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));108109memcpy(out, in, length);110sg_init_one(sg, out, length);111112ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);113out:114dprintk("RPC: gss_k5decrypt returns %d\n",ret);115return ret;116}117118static int119checksummer(struct scatterlist *sg, void *data)120{121struct hash_desc *desc = data;122123return crypto_hash_update(desc, sg, sg->length);124}125126static int127arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])128{129unsigned int ms_usage;130131switch (usage) {132case KG_USAGE_SIGN:133ms_usage = 15;134break;135case KG_USAGE_SEAL:136ms_usage = 13;137break;138default:139return -EINVAL;140}141salt[0] = (ms_usage >> 0) & 0xff;142salt[1] = (ms_usage >> 8) & 0xff;143salt[2] = (ms_usage >> 16) & 0xff;144salt[3] = (ms_usage >> 24) & 0xff;145146return 0;147}148149static u32150make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,151struct xdr_buf *body, int body_offset, u8 *cksumkey,152unsigned int usage, struct xdr_netobj *cksumout)153{154struct hash_desc desc;155struct scatterlist sg[1];156int err;157u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];158u8 rc4salt[4];159struct crypto_hash *md5;160struct crypto_hash *hmac_md5;161162if (cksumkey == NULL)163return GSS_S_FAILURE;164165if (cksumout->len < kctx->gk5e->cksumlength) {166dprintk("%s: checksum buffer length, %u, too small for %s\n",167__func__, cksumout->len, kctx->gk5e->name);168return GSS_S_FAILURE;169}170171if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {172dprintk("%s: invalid usage value %u\n", __func__, usage);173return GSS_S_FAILURE;174}175176md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);177if (IS_ERR(md5))178return GSS_S_FAILURE;179180hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,181CRYPTO_ALG_ASYNC);182if (IS_ERR(hmac_md5)) {183crypto_free_hash(md5);184return GSS_S_FAILURE;185}186187desc.tfm = md5;188desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;189190err = crypto_hash_init(&desc);191if (err)192goto out;193sg_init_one(sg, rc4salt, 4);194err = crypto_hash_update(&desc, sg, 4);195if (err)196goto out;197198sg_init_one(sg, header, hdrlen);199err = crypto_hash_update(&desc, sg, hdrlen);200if (err)201goto out;202err = xdr_process_buf(body, body_offset, body->len - body_offset,203checksummer, &desc);204if (err)205goto out;206err = crypto_hash_final(&desc, checksumdata);207if (err)208goto out;209210desc.tfm = hmac_md5;211desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;212213err = crypto_hash_init(&desc);214if (err)215goto out;216err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);217if (err)218goto out;219220sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));221err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),222checksumdata);223if (err)224goto out;225226memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);227cksumout->len = kctx->gk5e->cksumlength;228out:229crypto_free_hash(md5);230crypto_free_hash(hmac_md5);231return err ? GSS_S_FAILURE : 0;232}233234/*235* checksum the plaintext data and hdrlen bytes of the token header236* The checksum is performed over the first 8 bytes of the237* gss token header and then over the data body238*/239u32240make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,241struct xdr_buf *body, int body_offset, u8 *cksumkey,242unsigned int usage, struct xdr_netobj *cksumout)243{244struct hash_desc desc;245struct scatterlist sg[1];246int err;247u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];248unsigned int checksumlen;249250if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)251return make_checksum_hmac_md5(kctx, header, hdrlen,252body, body_offset,253cksumkey, usage, cksumout);254255if (cksumout->len < kctx->gk5e->cksumlength) {256dprintk("%s: checksum buffer length, %u, too small for %s\n",257__func__, cksumout->len, kctx->gk5e->name);258return GSS_S_FAILURE;259}260261desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);262if (IS_ERR(desc.tfm))263return GSS_S_FAILURE;264desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;265266checksumlen = crypto_hash_digestsize(desc.tfm);267268if (cksumkey != NULL) {269err = crypto_hash_setkey(desc.tfm, cksumkey,270kctx->gk5e->keylength);271if (err)272goto out;273}274275err = crypto_hash_init(&desc);276if (err)277goto out;278sg_init_one(sg, header, hdrlen);279err = crypto_hash_update(&desc, sg, hdrlen);280if (err)281goto out;282err = xdr_process_buf(body, body_offset, body->len - body_offset,283checksummer, &desc);284if (err)285goto out;286err = crypto_hash_final(&desc, checksumdata);287if (err)288goto out;289290switch (kctx->gk5e->ctype) {291case CKSUMTYPE_RSA_MD5:292err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,293checksumdata, checksumlen);294if (err)295goto out;296memcpy(cksumout->data,297checksumdata + checksumlen - kctx->gk5e->cksumlength,298kctx->gk5e->cksumlength);299break;300case CKSUMTYPE_HMAC_SHA1_DES3:301memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);302break;303default:304BUG();305break;306}307cksumout->len = kctx->gk5e->cksumlength;308out:309crypto_free_hash(desc.tfm);310return err ? GSS_S_FAILURE : 0;311}312313/*314* checksum the plaintext data and hdrlen bytes of the token header315* Per rfc4121, sec. 4.2.4, the checksum is performed over the data316* body then over the first 16 octets of the MIC token317* Inclusion of the header data in the calculation of the318* checksum is optional.319*/320u32321make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,322struct xdr_buf *body, int body_offset, u8 *cksumkey,323unsigned int usage, struct xdr_netobj *cksumout)324{325struct hash_desc desc;326struct scatterlist sg[1];327int err;328u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];329unsigned int checksumlen;330331if (kctx->gk5e->keyed_cksum == 0) {332dprintk("%s: expected keyed hash for %s\n",333__func__, kctx->gk5e->name);334return GSS_S_FAILURE;335}336if (cksumkey == NULL) {337dprintk("%s: no key supplied for %s\n",338__func__, kctx->gk5e->name);339return GSS_S_FAILURE;340}341342desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,343CRYPTO_ALG_ASYNC);344if (IS_ERR(desc.tfm))345return GSS_S_FAILURE;346checksumlen = crypto_hash_digestsize(desc.tfm);347desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;348349err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);350if (err)351goto out;352353err = crypto_hash_init(&desc);354if (err)355goto out;356err = xdr_process_buf(body, body_offset, body->len - body_offset,357checksummer, &desc);358if (err)359goto out;360if (header != NULL) {361sg_init_one(sg, header, hdrlen);362err = crypto_hash_update(&desc, sg, hdrlen);363if (err)364goto out;365}366err = crypto_hash_final(&desc, checksumdata);367if (err)368goto out;369370cksumout->len = kctx->gk5e->cksumlength;371372switch (kctx->gk5e->ctype) {373case CKSUMTYPE_HMAC_SHA1_96_AES128:374case CKSUMTYPE_HMAC_SHA1_96_AES256:375/* note that this truncates the hash */376memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);377break;378default:379BUG();380break;381}382out:383crypto_free_hash(desc.tfm);384return err ? GSS_S_FAILURE : 0;385}386387struct encryptor_desc {388u8 iv[GSS_KRB5_MAX_BLOCKSIZE];389struct blkcipher_desc desc;390int pos;391struct xdr_buf *outbuf;392struct page **pages;393struct scatterlist infrags[4];394struct scatterlist outfrags[4];395int fragno;396int fraglen;397};398399static int400encryptor(struct scatterlist *sg, void *data)401{402struct encryptor_desc *desc = data;403struct xdr_buf *outbuf = desc->outbuf;404struct page *in_page;405int thislen = desc->fraglen + sg->length;406int fraglen, ret;407int page_pos;408409/* Worst case is 4 fragments: head, end of page 1, start410* of page 2, tail. Anything more is a bug. */411BUG_ON(desc->fragno > 3);412413page_pos = desc->pos - outbuf->head[0].iov_len;414if (page_pos >= 0 && page_pos < outbuf->page_len) {415/* pages are not in place: */416int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;417in_page = desc->pages[i];418} else {419in_page = sg_page(sg);420}421sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,422sg->offset);423sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,424sg->offset);425desc->fragno++;426desc->fraglen += sg->length;427desc->pos += sg->length;428429fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);430thislen -= fraglen;431432if (thislen == 0)433return 0;434435sg_mark_end(&desc->infrags[desc->fragno - 1]);436sg_mark_end(&desc->outfrags[desc->fragno - 1]);437438ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,439desc->infrags, thislen);440if (ret)441return ret;442443sg_init_table(desc->infrags, 4);444sg_init_table(desc->outfrags, 4);445446if (fraglen) {447sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,448sg->offset + sg->length - fraglen);449desc->infrags[0] = desc->outfrags[0];450sg_assign_page(&desc->infrags[0], in_page);451desc->fragno = 1;452desc->fraglen = fraglen;453} else {454desc->fragno = 0;455desc->fraglen = 0;456}457return 0;458}459460int461gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,462int offset, struct page **pages)463{464int ret;465struct encryptor_desc desc;466467BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);468469memset(desc.iv, 0, sizeof(desc.iv));470desc.desc.tfm = tfm;471desc.desc.info = desc.iv;472desc.desc.flags = 0;473desc.pos = offset;474desc.outbuf = buf;475desc.pages = pages;476desc.fragno = 0;477desc.fraglen = 0;478479sg_init_table(desc.infrags, 4);480sg_init_table(desc.outfrags, 4);481482ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);483return ret;484}485486struct decryptor_desc {487u8 iv[GSS_KRB5_MAX_BLOCKSIZE];488struct blkcipher_desc desc;489struct scatterlist frags[4];490int fragno;491int fraglen;492};493494static int495decryptor(struct scatterlist *sg, void *data)496{497struct decryptor_desc *desc = data;498int thislen = desc->fraglen + sg->length;499int fraglen, ret;500501/* Worst case is 4 fragments: head, end of page 1, start502* of page 2, tail. Anything more is a bug. */503BUG_ON(desc->fragno > 3);504sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,505sg->offset);506desc->fragno++;507desc->fraglen += sg->length;508509fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);510thislen -= fraglen;511512if (thislen == 0)513return 0;514515sg_mark_end(&desc->frags[desc->fragno - 1]);516517ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,518desc->frags, thislen);519if (ret)520return ret;521522sg_init_table(desc->frags, 4);523524if (fraglen) {525sg_set_page(&desc->frags[0], sg_page(sg), fraglen,526sg->offset + sg->length - fraglen);527desc->fragno = 1;528desc->fraglen = fraglen;529} else {530desc->fragno = 0;531desc->fraglen = 0;532}533return 0;534}535536int537gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,538int offset)539{540struct decryptor_desc desc;541542/* XXXJBF: */543BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);544545memset(desc.iv, 0, sizeof(desc.iv));546desc.desc.tfm = tfm;547desc.desc.info = desc.iv;548desc.desc.flags = 0;549desc.fragno = 0;550desc.fraglen = 0;551552sg_init_table(desc.frags, 4);553554return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);555}556557/*558* This function makes the assumption that it was ultimately called559* from gss_wrap().560*561* The client auth_gss code moves any existing tail data into a562* separate page before calling gss_wrap.563* The server svcauth_gss code ensures that both the head and the564* tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.565*566* Even with that guarantee, this function may be called more than567* once in the processing of gss_wrap(). The best we can do is568* verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the569* largest expected shift will fit within RPC_MAX_AUTH_SIZE.570* At run-time we can verify that a single invocation of this571* function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.572*/573574int575xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)576{577u8 *p;578579if (shiftlen == 0)580return 0;581582BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);583BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);584585p = buf->head[0].iov_base + base;586587memmove(p + shiftlen, p, buf->head[0].iov_len - base);588589buf->head[0].iov_len += shiftlen;590buf->len += shiftlen;591592return 0;593}594595static u32596gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,597u32 offset, u8 *iv, struct page **pages, int encrypt)598{599u32 ret;600struct scatterlist sg[1];601struct blkcipher_desc desc = { .tfm = cipher, .info = iv };602u8 data[crypto_blkcipher_blocksize(cipher) * 2];603struct page **save_pages;604u32 len = buf->len - offset;605606BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);607608/*609* For encryption, we want to read from the cleartext610* page cache pages, and write the encrypted data to611* the supplied xdr_buf pages.612*/613save_pages = buf->pages;614if (encrypt)615buf->pages = pages;616617ret = read_bytes_from_xdr_buf(buf, offset, data, len);618buf->pages = save_pages;619if (ret)620goto out;621622sg_init_one(sg, data, len);623624if (encrypt)625ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);626else627ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);628629if (ret)630goto out;631632ret = write_bytes_to_xdr_buf(buf, offset, data, len);633634out:635return ret;636}637638u32639gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,640struct xdr_buf *buf, int ec, struct page **pages)641{642u32 err;643struct xdr_netobj hmac;644u8 *cksumkey;645u8 *ecptr;646struct crypto_blkcipher *cipher, *aux_cipher;647int blocksize;648struct page **save_pages;649int nblocks, nbytes;650struct encryptor_desc desc;651u32 cbcbytes;652unsigned int usage;653654if (kctx->initiate) {655cipher = kctx->initiator_enc;656aux_cipher = kctx->initiator_enc_aux;657cksumkey = kctx->initiator_integ;658usage = KG_USAGE_INITIATOR_SEAL;659} else {660cipher = kctx->acceptor_enc;661aux_cipher = kctx->acceptor_enc_aux;662cksumkey = kctx->acceptor_integ;663usage = KG_USAGE_ACCEPTOR_SEAL;664}665blocksize = crypto_blkcipher_blocksize(cipher);666667/* hide the gss token header and insert the confounder */668offset += GSS_KRB5_TOK_HDR_LEN;669if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))670return GSS_S_FAILURE;671gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);672offset -= GSS_KRB5_TOK_HDR_LEN;673674if (buf->tail[0].iov_base != NULL) {675ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;676} else {677buf->tail[0].iov_base = buf->head[0].iov_base678+ buf->head[0].iov_len;679buf->tail[0].iov_len = 0;680ecptr = buf->tail[0].iov_base;681}682683memset(ecptr, 'X', ec);684buf->tail[0].iov_len += ec;685buf->len += ec;686687/* copy plaintext gss token header after filler (if any) */688memcpy(ecptr + ec, buf->head[0].iov_base + offset,689GSS_KRB5_TOK_HDR_LEN);690buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;691buf->len += GSS_KRB5_TOK_HDR_LEN;692693/* Do the HMAC */694hmac.len = GSS_KRB5_MAX_CKSUM_LEN;695hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;696697/*698* When we are called, pages points to the real page cache699* data -- which we can't go and encrypt! buf->pages points700* to scratch pages which we are going to send off to the701* client/server. Swap in the plaintext pages to calculate702* the hmac.703*/704save_pages = buf->pages;705buf->pages = pages;706707err = make_checksum_v2(kctx, NULL, 0, buf,708offset + GSS_KRB5_TOK_HDR_LEN,709cksumkey, usage, &hmac);710buf->pages = save_pages;711if (err)712return GSS_S_FAILURE;713714nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;715nblocks = (nbytes + blocksize - 1) / blocksize;716cbcbytes = 0;717if (nblocks > 2)718cbcbytes = (nblocks - 2) * blocksize;719720memset(desc.iv, 0, sizeof(desc.iv));721722if (cbcbytes) {723desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;724desc.fragno = 0;725desc.fraglen = 0;726desc.pages = pages;727desc.outbuf = buf;728desc.desc.info = desc.iv;729desc.desc.flags = 0;730desc.desc.tfm = aux_cipher;731732sg_init_table(desc.infrags, 4);733sg_init_table(desc.outfrags, 4);734735err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,736cbcbytes, encryptor, &desc);737if (err)738goto out_err;739}740741/* Make sure IV carries forward from any CBC results. */742err = gss_krb5_cts_crypt(cipher, buf,743offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,744desc.iv, pages, 1);745if (err) {746err = GSS_S_FAILURE;747goto out_err;748}749750/* Now update buf to account for HMAC */751buf->tail[0].iov_len += kctx->gk5e->cksumlength;752buf->len += kctx->gk5e->cksumlength;753754out_err:755if (err)756err = GSS_S_FAILURE;757return err;758}759760u32761gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,762u32 *headskip, u32 *tailskip)763{764struct xdr_buf subbuf;765u32 ret = 0;766u8 *cksum_key;767struct crypto_blkcipher *cipher, *aux_cipher;768struct xdr_netobj our_hmac_obj;769u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];770u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];771int nblocks, blocksize, cbcbytes;772struct decryptor_desc desc;773unsigned int usage;774775if (kctx->initiate) {776cipher = kctx->acceptor_enc;777aux_cipher = kctx->acceptor_enc_aux;778cksum_key = kctx->acceptor_integ;779usage = KG_USAGE_ACCEPTOR_SEAL;780} else {781cipher = kctx->initiator_enc;782aux_cipher = kctx->initiator_enc_aux;783cksum_key = kctx->initiator_integ;784usage = KG_USAGE_INITIATOR_SEAL;785}786blocksize = crypto_blkcipher_blocksize(cipher);787788789/* create a segment skipping the header and leaving out the checksum */790xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,791(buf->len - offset - GSS_KRB5_TOK_HDR_LEN -792kctx->gk5e->cksumlength));793794nblocks = (subbuf.len + blocksize - 1) / blocksize;795796cbcbytes = 0;797if (nblocks > 2)798cbcbytes = (nblocks - 2) * blocksize;799800memset(desc.iv, 0, sizeof(desc.iv));801802if (cbcbytes) {803desc.fragno = 0;804desc.fraglen = 0;805desc.desc.info = desc.iv;806desc.desc.flags = 0;807desc.desc.tfm = aux_cipher;808809sg_init_table(desc.frags, 4);810811ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);812if (ret)813goto out_err;814}815816/* Make sure IV carries forward from any CBC results. */817ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);818if (ret)819goto out_err;820821822/* Calculate our hmac over the plaintext data */823our_hmac_obj.len = sizeof(our_hmac);824our_hmac_obj.data = our_hmac;825826ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,827cksum_key, usage, &our_hmac_obj);828if (ret)829goto out_err;830831/* Get the packet's hmac value */832ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,833pkt_hmac, kctx->gk5e->cksumlength);834if (ret)835goto out_err;836837if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {838ret = GSS_S_BAD_SIG;839goto out_err;840}841*headskip = kctx->gk5e->conflen;842*tailskip = kctx->gk5e->cksumlength;843out_err:844if (ret && ret != GSS_S_BAD_SIG)845ret = GSS_S_FAILURE;846return ret;847}848849/*850* Compute Kseq given the initial session key and the checksum.851* Set the key of the given cipher.852*/853int854krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,855unsigned char *cksum)856{857struct crypto_hash *hmac;858struct hash_desc desc;859struct scatterlist sg[1];860u8 Kseq[GSS_KRB5_MAX_KEYLEN];861u32 zeroconstant = 0;862int err;863864dprintk("%s: entered\n", __func__);865866hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);867if (IS_ERR(hmac)) {868dprintk("%s: error %ld, allocating hash '%s'\n",869__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);870return PTR_ERR(hmac);871}872873desc.tfm = hmac;874desc.flags = 0;875876err = crypto_hash_init(&desc);877if (err)878goto out_err;879880/* Compute intermediate Kseq from session key */881err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);882if (err)883goto out_err;884885sg_init_table(sg, 1);886sg_set_buf(sg, &zeroconstant, 4);887888err = crypto_hash_digest(&desc, sg, 4, Kseq);889if (err)890goto out_err;891892/* Compute final Kseq from the checksum and intermediate Kseq */893err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);894if (err)895goto out_err;896897sg_set_buf(sg, cksum, 8);898899err = crypto_hash_digest(&desc, sg, 8, Kseq);900if (err)901goto out_err;902903err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);904if (err)905goto out_err;906907err = 0;908909out_err:910crypto_free_hash(hmac);911dprintk("%s: returning %d\n", __func__, err);912return err;913}914915/*916* Compute Kcrypt given the initial session key and the plaintext seqnum.917* Set the key of cipher kctx->enc.918*/919int920krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,921s32 seqnum)922{923struct crypto_hash *hmac;924struct hash_desc desc;925struct scatterlist sg[1];926u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];927u8 zeroconstant[4] = {0};928u8 seqnumarray[4];929int err, i;930931dprintk("%s: entered, seqnum %u\n", __func__, seqnum);932933hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);934if (IS_ERR(hmac)) {935dprintk("%s: error %ld, allocating hash '%s'\n",936__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);937return PTR_ERR(hmac);938}939940desc.tfm = hmac;941desc.flags = 0;942943err = crypto_hash_init(&desc);944if (err)945goto out_err;946947/* Compute intermediate Kcrypt from session key */948for (i = 0; i < kctx->gk5e->keylength; i++)949Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;950951err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);952if (err)953goto out_err;954955sg_init_table(sg, 1);956sg_set_buf(sg, zeroconstant, 4);957958err = crypto_hash_digest(&desc, sg, 4, Kcrypt);959if (err)960goto out_err;961962/* Compute final Kcrypt from the seqnum and intermediate Kcrypt */963err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);964if (err)965goto out_err;966967seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);968seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);969seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);970seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);971972sg_set_buf(sg, seqnumarray, 4);973974err = crypto_hash_digest(&desc, sg, 4, Kcrypt);975if (err)976goto out_err;977978err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);979if (err)980goto out_err;981982err = 0;983984out_err:985crypto_free_hash(hmac);986dprintk("%s: returning %d\n", __func__, err);987return err;988}989990991992