Path: blob/master/arch/parisc/include/asm/checksum.h
10819 views
#ifndef _PARISC_CHECKSUM_H1#define _PARISC_CHECKSUM_H23#include <linux/in6.h>45/*6* computes the checksum of a memory block at buff, length len,7* and adds in "sum" (32-bit)8*9* returns a 32-bit number suitable for feeding into itself10* or csum_tcpudp_magic11*12* this function must be called with even lengths, except13* for the last fragment, which may be odd14*15* it's best to have buff aligned on a 32-bit boundary16*/17extern __wsum csum_partial(const void *, int, __wsum);1819/*20* The same as csum_partial, but copies from src while it checksums.21*22* Here even more important to align src and dst on a 32-bit (or even23* better 64-bit) boundary24*/25extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);2627/*28* this is a new version of the above that records errors it finds in *errp,29* but continues and zeros the rest of the buffer.30*/31extern __wsum csum_partial_copy_from_user(const void __user *src,32void *dst, int len, __wsum sum, int *errp);3334/*35* Optimized for IP headers, which always checksum on 4 octet boundaries.36*37* Written by Randolph Chung <[email protected]>, and then mucked with by38* LaMont Jones <[email protected]>39*/40static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)41{42unsigned int sum;4344__asm__ __volatile__ (45" ldws,ma 4(%1), %0\n"46" addib,<= -4, %2, 2f\n"47"\n"48" ldws 4(%1), %%r20\n"49" ldws 8(%1), %%r21\n"50" add %0, %%r20, %0\n"51" ldws,ma 12(%1), %%r19\n"52" addc %0, %%r21, %0\n"53" addc %0, %%r19, %0\n"54"1: ldws,ma 4(%1), %%r19\n"55" addib,< 0, %2, 1b\n"56" addc %0, %%r19, %0\n"57"\n"58" extru %0, 31, 16, %%r20\n"59" extru %0, 15, 16, %%r21\n"60" addc %%r20, %%r21, %0\n"61" extru %0, 15, 16, %%r21\n"62" add %0, %%r21, %0\n"63" subi -1, %0, %0\n"64"2:\n"65: "=r" (sum), "=r" (iph), "=r" (ihl)66: "1" (iph), "2" (ihl)67: "r19", "r20", "r21", "memory");6869return (__force __sum16)sum;70}7172/*73* Fold a partial checksum74*/75static inline __sum16 csum_fold(__wsum csum)76{77u32 sum = (__force u32)csum;78/* add the swapped two 16-bit halves of sum,79a possible carry from adding the two 16-bit halves,80will carry from the lower half into the upper half,81giving us the correct sum in the upper half. */82sum += (sum << 16) + (sum >> 16);83return (__force __sum16)(~sum >> 16);84}8586static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,87unsigned short len,88unsigned short proto,89__wsum sum)90{91__asm__(92" add %1, %0, %0\n"93" addc %2, %0, %0\n"94" addc %3, %0, %0\n"95" addc %%r0, %0, %0\n"96: "=r" (sum)97: "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));98return sum;99}100101/*102* computes the checksum of the TCP/UDP pseudo-header103* returns a 16-bit checksum, already complemented104*/105static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,106unsigned short len,107unsigned short proto,108__wsum sum)109{110return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));111}112113/*114* this routine is used for miscellaneous IP-like checksums, mainly115* in icmp.c116*/117static inline __sum16 ip_compute_csum(const void *buf, int len)118{119return csum_fold (csum_partial(buf, len, 0));120}121122123#define _HAVE_ARCH_IPV6_CSUM124static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,125const struct in6_addr *daddr,126__u32 len, unsigned short proto,127__wsum sum)128{129__asm__ __volatile__ (130131#if BITS_PER_LONG > 32132133/*134** We can execute two loads and two adds per cycle on PA 8000.135** But add insn's get serialized waiting for the carry bit.136** Try to keep 4 registers with "live" values ahead of the ALU.137*/138139" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */140" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */141" add %8, %3, %3\n"/* add 16-bit proto + len */142" add %%r19, %0, %0\n"143" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */144" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */145" add,dc %%r20, %0, %0\n"146" add,dc %%r21, %0, %0\n"147" add,dc %%r22, %0, %0\n"148" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */149" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */150" depdi 0, 31, 32, %0\n" /* clear upper half */151" add %%r19, %0, %0\n" /* fold into 32-bits */152" addc 0, %0, %0\n" /* add carry */153154#else155156/*157** For PA 1.x, the insn order doesn't matter as much.158** Insn stream is serialized on the carry bit here too.159** result from the previous operation (eg r0 + x)160*/161162" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */163" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */164" add %8, %3, %3\n" /* add 16-bit proto + len */165" add %%r19, %0, %0\n"166" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */167" addc %%r20, %0, %0\n"168" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */169" addc %%r21, %0, %0\n"170" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */171" addc %%r22, %0, %0\n"172" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */173" addc %%r19, %0, %0\n"174" ldw,ma 4(%1), %%r21\n" /* 4th saddr */175" addc %%r20, %0, %0\n"176" ldw,ma 4(%2), %%r22\n" /* 4th daddr */177" addc %%r21, %0, %0\n"178" addc %%r22, %0, %0\n"179" addc %3, %0, %0\n" /* fold in proto+len, catch carry */180181#endif182: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)183: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)184: "r19", "r20", "r21", "r22", "memory");185return csum_fold(sum);186}187188/*189* Copy and checksum to user190*/191#define HAVE_CSUM_COPY_USER192static __inline__ __wsum csum_and_copy_to_user(const void *src,193void __user *dst,194int len, __wsum sum,195int *err_ptr)196{197/* code stolen from include/asm-mips64 */198sum = csum_partial(src, len, sum);199200if (copy_to_user(dst, src, len)) {201*err_ptr = -EFAULT;202return (__force __wsum)-1;203}204205return sum;206}207208#endif209210211212