sima/autoconf/
sima/hosts/i386/
sima/mudlib/
sima/mudlib/kernel/
sima/mudlib/obj/
sima/mudlib/sys/
sima/synhash/mips/
.data
_T:
	.byte   1,  87,  49,  12, 176, 178, 102, 166
	.byte 121, 193,   6,  84, 249, 230,  44, 163
	.byte  14, 197, 213, 181, 161,  85, 218,  80
	.byte  64, 239,  24, 226, 236, 142,  38, 200
	.byte 110, 177, 104, 103, 141, 253, 255,  50
	.byte  77, 101,  81,  18,  45,  96,  31, 222
	.byte  25, 107, 190,  70,  86, 237, 240,  34
	.byte  72, 242,  20, 214, 244, 227, 149, 235
	.byte  97, 234,  57,  22,  60, 250,  82, 175
	.byte 208,   5, 127, 199, 111,  62, 135, 248
	.byte 174, 169, 211,  58,  66, 154, 106, 195
	.byte 245, 171,  17, 187, 182, 179,   0, 243
	.byte 132,  56, 148,  75, 128, 133, 158, 100
	.byte 130, 126,  91,  13, 153, 246, 216, 219
	.byte 119,  68, 223,  78,  83,  88, 201,  99
	.byte 122,  11,  92,  32, 136, 114,  52,  10
	.byte 138,  30,  48, 183, 156,  35,  61,  26
	.byte 143,  74, 251,  94, 129, 162,  63, 152
	.byte 170,   7, 115, 167, 241, 206,   3, 150
	.byte  55,  59, 151, 220,  90,  53,  23, 131
	.byte 125, 173,  15, 238,  79,  95,  89,  16
	.byte 105, 137, 225, 224, 217, 160,  37, 123
	.byte 118,  73,   2, 157,  46, 116,   9, 145
	.byte 134, 228, 207, 212, 202, 215,  69, 229
	.byte  27, 188,  67, 124, 168, 252,  42,   4
	.byte  29, 108,  21, 247,  19, 205,  39, 203
	.byte 233,  40, 186, 147, 198, 192, 155,  33
	.byte 164, 191,  98, 204, 165, 180, 117,  76
	.byte 140,  36, 210, 172,  41,  54, 159,   8
	.byte 185, 232, 113, 196, 231,  47, 146, 120
	.byte  51,  65,  28, 144, 254, 221,  93, 189
	.byte 194, 139, 112,  43,  71, 109, 184, 209

.text

/*
 * take a 4-byte aligned string, pad it and hash it
 * caller is expected to test for 0 length.
 */
.globl _aphash
	.align 4
_aphash:
	movl 4(%esp),%edx /* addr */
	movl 8(%esp),%ecx /* length */
	subl $4,%ecx
	movl %ecx,%eax
	jle L20
	subl $125,%eax
	jl L21

	addl %edx,%ecx
	andb $252,%eax
	addl %eax,%edx
	jmp L22
	.align 2,0x90
L21:
	addl %edx,%ecx
	testb $4,%eax
	je L22
	xorl (%edx),%eax
	roll $1,%eax
	addl $4,%edx
	cmpl %ecx,%edx
	jae L23
	.align 2,0x90
L22:
	xorl (%edx),%eax
	roll $1,%eax
	xorl 4(%edx),%eax
	roll $1,%eax
	addl $8,%edx
	cmpl %ecx,%edx
	jb L22
L23:
	subl %edx,%ecx
L20:
	pushl %ebx
	negl %ecx
	shlb $3,%cl
	movl (%edx),%ebx
	shll %cl,%ebx
	xorl %edx,%eax
	shrl %cl,%ebx
	movl %ebx,(%edx)	/* pad string in memory to allow easy compare */
	popl %ebx

	movb %al,%cl
	movb _T(%ecx),%cl
	xorb %ah,%cl
	shrl $16,%eax
	xorb _T(%ecx),%ah
	movb %ah,%cl
	xorb _T(%ecx),%al

	ret

.globl _hash
	.align 4
_hash:
	movl 4(%esp),%edx /* addr */
	movl 8(%esp),%ecx /* length */
	subl $4,%ecx
	movl %ecx,%eax
	jle L30
	subl $125,%eax
	jg L31
	addl %edx,%ecx
	testb $4,%eax
	je L32
	xorl (%edx),%eax
	roll $1,%eax
	addl $4,%edx
	cmpl %ecx,%edx
	jae L33
	.align 2,0x90
L32:
	xorl (%edx),%eax
	roll $1,%eax
	xorl 4(%edx),%eax
	roll $1,%eax
	addl $8,%edx
	cmpl %ecx,%edx
	jb L32
L33:
	subl %edx,%ecx
L30:
	negl %ecx
	shlb $3,%cl
	movl (%edx),%edx
	shll %cl,%edx
	xorl %edx,%eax

	movb %al,%cl
	movb _T(%ecx),%cl
	xorb %ah,%cl
	shrl $16,%eax
	xorb _T(%ecx),%ah
	movb %ah,%cl
	xorb _T(%ecx),%al

	ret

	.align 2,0x90
L31:
	addl %edx,%ecx
	andb $252,%eax
	addl %eax,%edx
	jmp L32


	.align 4,0x90
small:
	movl %ecx,%eax
	incl %eax
	jns L30
	subl $-2,%eax
	je len1
	js len0
	movb (%ebp),%eax
	incl %ebp
	movb _T(%eax),%al
len1:
	movb (%ebp),%ah
	xorb %ah,%al
len0:
	popl %ebp
	ret

















	movl %edx,%ecx
	andb $3,%cl
	shlb $3,%cl
	movl -4(%ebp,%edx),%eax
	shll %cl,%eax
	decl %edx
	sarl $2,%edx
	jc L1
	je L2
	subl $4,%ecx
	.align 4,0x90
	roll $1,%eax
	xorl -4(%ecx),%eax
L1:
	roll $1,%eax
	xorl (%ecx),%eax
	addl $8,%ecx
	decl %edx
	jne L3
L2:
	movb %al,%dl
	movb _T(%edx),%dl
	xorb %ah,%dl
	shrl $16,%eax
	xorb _T(%edx),%ah
	movb %ah,%dl
	xorb _T(%edx),%al

	popl %ebp
	ret




	.align 4,0x90
large:
/*
 *	%ebp : addr (old value pushed)
 *	%edx : length
 */
	pushl %edi
	pushl %esi
	pushl %ebx
	movl (%ebp),%ecx
	movl 4(%ebp),%ebx
	movl 8(%ebp),%esi
	movl 12(%ebp),%edi
	cmpl $127,%edx
	jle L3
	sarl $3,%edx
	andb $252,%dl	/* %edx : offset */
	movl $7,%eax	/* %eax : count */
	addl %edx,%ebp
	jmp L4
	.align 4,0x90
L3:
	addl $-4,%edx
	movl %edx,%eax
	sarl $4,%eax	/* eax : count */
	andl $12,%edx
	leal 4(%edx,%ebp),%ebp
	movl $16,%edx	/* %edx : offset */
	.align 2,0x90
L4:
	addl %ecx,%ecx
	adcl %ebx,%ebx
	adcl %esi,%esi
	adcl %edi,%edi
	adcl $0,%ecx
	xorl (%ebp),%ecx
	xorl 4(%ebp),%ebx
	xorl 8(%ebp),%esi
	xorl 12(%ebp),%edi
	addl %edx,%ebp
	decl %eax
	jne L4

	/* benchmarks showed that shifts are faster than xchgb on a 486dx2 */
	xorl %edx,%edx
	movl $4,%ebp
L5:
	xorb %cl,%al
	movb _T(%eax),%al
	rorl $8,%ecx
	xorb %bl,%dl
	movb _T(%edx),%dl
	rorl $8,%ebx
	xorb %esi,%eax
	movb _T(%eax),%al
	rorl $8,%esi
	xorb %edi,%eax
	movb _T(%eax),%al
	rorl $8,%edi
	decl %ebp
	jne L5

	movb %dl,%ah

	popl %ebx
	popl %esi
	popl %edi
	popl %ebp
	ret