sima/autoconf/
sima/hosts/i386/
sima/mudlib/
sima/mudlib/kernel/
sima/mudlib/obj/
sima/mudlib/sys/
sima/synhash/mips/
.data
_T:
	.byte   1,  87,  49,  12, 176, 178, 102, 166
	.byte 121, 193,   6,  84, 249, 230,  44, 163
	.byte  14, 197, 213, 181, 161,  85, 218,  80
	.byte  64, 239,  24, 226, 236, 142,  38, 200
	.byte 110, 177, 104, 103, 141, 253, 255,  50
	.byte  77, 101,  81,  18,  45,  96,  31, 222
	.byte  25, 107, 190,  70,  86, 237, 240,  34
	.byte  72, 242,  20, 214, 244, 227, 149, 235
	.byte  97, 234,  57,  22,  60, 250,  82, 175
	.byte 208,   5, 127, 199, 111,  62, 135, 248
	.byte 174, 169, 211,  58,  66, 154, 106, 195
	.byte 245, 171,  17, 187, 182, 179,   0, 243
	.byte 132,  56, 148,  75, 128, 133, 158, 100
	.byte 130, 126,  91,  13, 153, 246, 216, 219
	.byte 119,  68, 223,  78,  83,  88, 201,  99
	.byte 122,  11,  92,  32, 136, 114,  52,  10
	.byte 138,  30,  48, 183, 156,  35,  61,  26
	.byte 143,  74, 251,  94, 129, 162,  63, 152
	.byte 170,   7, 115, 167, 241, 206,   3, 150
	.byte  55,  59, 151, 220,  90,  53,  23, 131
	.byte 125, 173,  15, 238,  79,  95,  89,  16
	.byte 105, 137, 225, 224, 217, 160,  37, 123
	.byte 118,  73,   2, 157,  46, 116,   9, 145
	.byte 134, 228, 207, 212, 202, 215,  69, 229
	.byte  27, 188,  67, 124, 168, 252,  42,   4
	.byte  29, 108,  21, 247,  19, 205,  39, 203
	.byte 233,  40, 186, 147, 198, 192, 155,  33
	.byte 164, 191,  98, 204, 165, 180, 117,  76
	.byte 140,  36, 210, 172,  41,  54, 159,   8
	.byte 185, 232, 113, 196, 231,  47, 146, 120
	.byte  51,  65,  28, 144, 254, 221,  93, 189
	.byte 194, 139, 112,  43,  71, 109, 184, 209

.text
	.align 4
.globl _hash
_hash:
	pushl %ebp
	movl 8(%esp),%ecx
	movl 12(%esp),%ebp
	cmpl $31,%ebp
	jg L1
	xorl %eax,%eax
	xorl %edx,%edx
	jmp L2
	.align 4,0x90
L3:
	xorb (%ecx),%al
	movb _T(%eax),%al
	xorb 1(%ecx),%dl
	movb _T(%edx),%dl
	xorb 2(%ecx),%al
	movb _T(%eax),%al
	xorb 3(%ecx),%al
	movb _T(%eax),%al
	addl $4,%ecx
L2:
	addl $-4,%ebp
	jns L3

#if 0 /* doesn't work, although I can't tell why */
	notl %ebp
	jmp L4(%ebp,%ebp,8)
#else
	jmp *L4(,%ebp,4)
	.align 4,0x90
	.long L4_3
	.long L4_2
	.long L4_1
	.long L4_0
#endif
L4:
L4_0:
	xorb (%ecx),%al
	movb _T(%eax),%al
	incl %ecx
L4_1:
	xorb (%ecx),%dl
	movb _T(%edx),%dl
	incl %ecx
L4_2:
	xorb (%ecx),%al
	movb _T(%eax),%al
	nop
L4_3:

	movb %dl,%ah

	popl %ebp
	ret

	.align 4,0x90
L1:
	pushl %edi
	pushl %esi
	pushl %ebx
	movl %ecx,%eax
	andl $3,%ecx
	andb $252,%al
	sall $3,%ecx
	movl $-1,%ebp
	pushl %ecx	/* variable push: shift */
	sall %cl,%ebp
	pushl %ebp	/* variable push: mask */
	movl (%eax),%edx
	movl 4(%eax),%ebx
	movl 8(%eax),%esi
	movl 12(%eax),%edi
	leal 16(%eax),%eax
	xorl (%eax),%edx
	andl %ebp,%edx
	xorl (%eax),%edx
	movl 32(%esp),%ecx
	cmpl $127,%ecx
	jle L5
	sarl $3,%ecx
	andb $252,%cl
	movl $7,%ebp
	addl %ecx,%eax
	pushl %ecx	/* variable push: offset */
	jmp L6
	.align 4,0x90
L5:
	addl $-4,%ecx
	movl %ecx,%ebp
	sarl $4,%ebp
	andl $12,%ecx
	leal 4(%ecx,%eax),%eax
	pushl $16
	.align 2,0x90
L6:
	addl %edx,%edx
	adcl %ebx,%ebx
	adcl %esi,%esi
	adcl %edi,%edi
	adcl $0,%edx
	movl -16(%eax),%ecx
	xorl -12(%eax),%ebx
	xorl -8(%eax),%esi
	xorl -4(%eax),%edi
	xorl (%eax),%ecx
	andl 4(%esp),%ecx
	xorl (%eax),%ecx
	xorl %ecx,%edx
	addl (%esp),%eax
	decl %ebp
	jne L6

	movl %edx,%eax
	movl 8(%esp),%cl
	shrd %cl,%ebx,%edx
	shrd %cl,%esi,%ebx
	shrd %cl,%edi,%esi
	shrd %cl,%eax,%edi

	/* benchmarks showed that shifts are faster than xchgb on a 486dx2 */
	xorl %eax,%eax
	xorl %ecx,%ecx
	movl $4,%ebp
L7:
	xorb %dl,%al
	movb _T(%eax),%al
	rorl $8,%edx
	xorb %bl,%cl
	movb _T(%ecx),%cl
	rorl $8,%ebx
	xorb %esi,%eax
	movb _T(%eax),%al
	rorl $8,%esi
	xorb %edi,%eax
	movb _T(%eax),%al
	rorl $8,%edi
	decl %ebp
	jne L7

	movb %cl,%ah

	addl $12,%esp
	popl %ebx
	popl %esi
	popl %edi
	popl %ebp
	ret