checksum.h 7.04 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
/* $Id: checksum.h,v 1.33 2002/02/01 22:01:05 davem Exp $ */
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H

/*  checksum.h:  IP/UDP/TCP checksum routines on the Sparc.
 *
 *  Copyright(C) 1995 Linus Torvalds
 *  Copyright(C) 1995 Miguel de Icaza
 *  Copyright(C) 1996 David S. Miller
 *  Copyright(C) 1996 Eddie C. Dost
 *  Copyright(C) 1997 Jakub Jelinek
 *
 * derived from:
 *	Alpha checksum c-code
 *      ix86 inline assembly
 *      RFC1071 Computing the Internet Checksum
 */
 
#include <linux/in6.h>
#include <asm/uaccess.h>

/* computes the checksum of a memory block at buff, length len,
 * and adds in "sum" (32-bit)
 *
 * returns a 32-bit number suitable for feeding into itself
 * or csum_tcpudp_magic
 *
 * this function must be called with even lengths, except
 * for the last fragment, which may be odd
 *
 * it's best to have buff aligned on a 32-bit boundary
 */
extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);

/* the same as csum_partial, but copies from fs:src while it
 * checksums
 *
 * here even more important to align src and dst on a 32-bit (or even
 * better 64-bit) boundary
 */

extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);

static inline unsigned int 
csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, int len,
			   unsigned int sum)
{
	register unsigned int ret asm("o0") = (unsigned int)src;
	register char *d asm("o1") = dst;
	register int l asm("g1") = len;

	__asm__ __volatile__ (
		"call __csum_partial_copy_sparc_generic\n\t"
		" mov %6, %%g7\n"
	: "=&r" (ret), "=&r" (d), "=&r" (l)
	: "0" (ret), "1" (d), "2" (l), "r" (sum)
	: "o2", "o3", "o4", "o5", "o7",
	  "g2", "g3", "g4", "g5", "g7",
	  "memory", "cc");
	return ret;
}

static inline unsigned int 
csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, int len,
			    unsigned int sum, int *err)
  {
	if (!access_ok (VERIFY_READ, src, len)) {
		*err = -EFAULT;
		memset (dst, 0, len);
		return sum;
	} else {
		register unsigned long ret asm("o0") = (unsigned long)src;
		register char *d asm("o1") = dst;
		register int l asm("g1") = len;
		register unsigned int s asm("g7") = sum;

		__asm__ __volatile__ (
		".section __ex_table,#alloc\n\t"
		".align 4\n\t"
		".word 1f,2\n\t"
		".previous\n"
		"1:\n\t"
		"call __csum_partial_copy_sparc_generic\n\t"
		" st %8, [%%sp + 64]\n"
		: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
		: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
		: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
		  "cc", "memory");
		return ret;
	}
  }
  
static inline unsigned int 
csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, int len,
			  unsigned int sum, int *err)
{
	if (!access_ok (VERIFY_WRITE, dst, len)) {
		*err = -EFAULT;
		return sum;
	} else {
		register unsigned long ret asm("o0") = (unsigned long)src;
		register char __user *d asm("o1") = dst;
		register int l asm("g1") = len;
		register unsigned int s asm("g7") = sum;

		__asm__ __volatile__ (
		".section __ex_table,#alloc\n\t"
		".align 4\n\t"
		".word 1f,1\n\t"
		".previous\n"
		"1:\n\t"
		"call __csum_partial_copy_sparc_generic\n\t"
		" st %8, [%%sp + 64]\n"
		: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
		: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
		: "o2", "o3", "o4", "o5", "o7",
		  "g2", "g3", "g4", "g5",
		  "cc", "memory");
		return ret;
	}
}

#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user

/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
 * the majority of the time.
 */
static inline unsigned short ip_fast_csum(const unsigned char *iph,
					  unsigned int ihl)
{
	unsigned short sum;

	/* Note: We must read %2 before we touch %0 for the first time,
	 *       because GCC can legitimately use the same register for
	 *       both operands.
	 */
	__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
			     "ld\t[%1 + 0x00], %0\n\t"
			     "ld\t[%1 + 0x04], %%g2\n\t"
			     "ld\t[%1 + 0x08], %%g3\n\t"
			     "addcc\t%%g2, %0, %0\n\t"
			     "addxcc\t%%g3, %0, %0\n\t"
			     "ld\t[%1 + 0x0c], %%g2\n\t"
			     "ld\t[%1 + 0x10], %%g3\n\t"
			     "addxcc\t%%g2, %0, %0\n\t"
			     "addx\t%0, %%g0, %0\n"
			     "1:\taddcc\t%%g3, %0, %0\n\t"
			     "add\t%1, 4, %1\n\t"
			     "addxcc\t%0, %%g0, %0\n\t"
			     "subcc\t%%g4, 1, %%g4\n\t"
			     "be,a\t2f\n\t"
			     "sll\t%0, 16, %%g2\n\t"
			     "b\t1b\n\t"
			     "ld\t[%1 + 0x10], %%g3\n"
			     "2:\taddcc\t%0, %%g2, %%g2\n\t"
			     "srl\t%%g2, 16, %0\n\t"
			     "addx\t%0, %%g0, %0\n\t"
			     "xnor\t%%g0, %0, %0"
			     : "=r" (sum), "=&r" (iph)
			     : "r" (ihl), "1" (iph)
			     : "g2", "g3", "g4", "cc");
	return sum;
}

/* Fold a partial checksum without adding pseudo headers. */
static inline unsigned int csum_fold(unsigned int sum)
{
	unsigned int tmp;

	__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
			     "srl\t%1, 16, %1\n\t"
			     "addx\t%1, %%g0, %1\n\t"
			     "xnor\t%%g0, %1, %0"
			     : "=&r" (sum), "=r" (tmp)
			     : "0" (sum), "1" (sum<<16)
			     : "cc");
	return sum;
}

static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
					       unsigned long daddr,
					       unsigned int len,
					       unsigned short proto,
					       unsigned int sum)
{
	__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
			     "addxcc\t%2, %0, %0\n\t"
			     "addxcc\t%3, %0, %0\n\t"
			     "addx\t%0, %%g0, %0\n\t"
			     : "=r" (sum), "=r" (saddr)
			     : "r" (daddr), "r" ((proto<<16)+len), "0" (sum),
			       "1" (saddr)
			     : "cc");
	return sum;
}

/*
 * computes the checksum of the TCP/UDP pseudo-header
 * returns a 16-bit checksum, already complemented
 */
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
						   unsigned long daddr,
						   unsigned short len,
						   unsigned short proto,
						   unsigned int sum) 
{
	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}

#define _HAVE_ARCH_IPV6_CSUM

static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
						 struct in6_addr *daddr,
						 __u32 len,
						 unsigned short proto,
						 unsigned int sum) 
{
	__asm__ __volatile__ (
		"addcc	%3, %4, %%g4\n\t"
		"addxcc	%5, %%g4, %%g4\n\t"
		"ld	[%2 + 0x0c], %%g2\n\t"
		"ld	[%2 + 0x08], %%g3\n\t"
		"addxcc	%%g2, %%g4, %%g4\n\t"
		"ld	[%2 + 0x04], %%g2\n\t"
		"addxcc	%%g3, %%g4, %%g4\n\t"
		"ld	[%2 + 0x00], %%g3\n\t"
		"addxcc	%%g2, %%g4, %%g4\n\t"
		"ld	[%1 + 0x0c], %%g2\n\t"
		"addxcc	%%g3, %%g4, %%g4\n\t"
		"ld	[%1 + 0x08], %%g3\n\t"
		"addxcc	%%g2, %%g4, %%g4\n\t"
		"ld	[%1 + 0x04], %%g2\n\t"
		"addxcc	%%g3, %%g4, %%g4\n\t"
		"ld	[%1 + 0x00], %%g3\n\t"
		"addxcc	%%g2, %%g4, %%g4\n\t"
		"addxcc	%%g3, %%g4, %0\n\t"
		"addx	0, %0, %0\n"
		: "=&r" (sum)
		: "r" (saddr), "r" (daddr), 
		  "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
		: "g2", "g3", "g4", "cc");

	return csum_fold(sum);
}

/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
{
	return csum_fold(csum_partial(buff, len, 0));
}

#endif /* !(__SPARC_CHECKSUM_H) */