ftp.nice.ch/pub/next/developer/languages/lisp/AKCL.1.599.s.tar.gz#/akcl-1-599/c/earith.c

This is earith.c in view mode; [Download] [Up]

/*
(c) Copyright Taiichi Yuasa and Masami Hagiya, 1984.  All rights reserved.
Copying of this file is authorized to users who have executed the true and
proper "License Agreement for Kyoto Common LISP" with SIGLISP.
*/

/*
	earith.c

	EXTENDED_MUL and EXTENDED_DIV perform 32 bit multiplication and
	division, respectively.

	EXTENDED_MUL(D,Q,R,HP,LP) 
	calculates D*Q+R and saves the result into the locations HP and LP.
	D, Q, and R are 32 bit non-negative integers and HP and LP are
	word addresses.  The word at LP will contain the lower 31 (not 32)
	bits of the result and its most significant bit is set 0. The word
	at HP will contain the rest of the result and its MSB is also set 0.

	EXTENDED_DIV(D,H,L,QP,RP)
	divides [H:L] by D and saves the quotient and the remainder into
	the locations QP and RP, respectively.  D, H, L are 32 bit non-negative
	integers and QP and RP are word addresses.  Here, [H:L] means the
	64 bit integer (imaginary) represented by H and L as follows.

	  63 62                  31 30                 0
	  |0|0|<lower 31 bits of H>|<lower 31 bits of L>|

	Although [H:L] is 64 bits, you can assume that the quotient is always
	represented as 32 bit non-negative integer.
*/

#include "include.h"

#ifdef I386
/* by W. Schelter and M. Ballantyne */


/*  debug print
#include <stdio.h>
extended_mul(a,b,r,c,d)
int a,b,r;
int *c;
int *d;
{extended_mul1(a,b,r,c,d);
 printf("Multiply (%d, %d, %d) --> (%d , %d) for %d \n",a,b,r,*c,*d,a*b+r);
 fflush(stdout);
}
*/

extended_mul(a,b,r,c,d)
int a,b,r;
int *c;
int *d;
{ asm("pushl    %ecx");
  asm("	movl	8(%ebp),%eax");
  asm("	mull	12(%ebp)");
  asm("	addl	16(%ebp),%eax");
  asm(" adcl	$0,%edx");
  asm("	shll	$1,%edx");
  asm(" btrl	$31,%eax");
  asm("	adcl	$0,%edx");
  asm(" movl	20(%ebp),%ecx");
  asm("	movl	%edx, (%ecx)");
  asm("	movl	24(%ebp), %ecx");
  asm("	movl	%eax, (%ecx)");
  asm("popl     %ecx");
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{ 
asm(  "pushl %ebx");
asm(  "movl 12(%ebp),%edx");
asm(  "movl 16(%ebp),%eax");
asm(  "btl $0,%edx");
asm(  "jae  .WFS");
asm(  "btsl $31,%eax");
asm(".WFS:");
asm(  "shrl $1,%edx");
asm(  "idivl 8(%ebp)");
asm(  "movl 20(%ebp),%ebx");
asm(  "movl %eax,(%ebx)");
asm(  "movl 24(%ebp),%ebx");
asm(  "movl %edx,(%ebx)");
asm(  "popl %ebx");
}
#endif

#ifdef VAX

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	asm("	emul	4(ap),8(ap),12(ap),r0");
	asm("	ashq	$1,r0,r0");
	asm("	rotl	$-1,r0,r0");
	asm("	movl	r0,*20(ap)");
	asm("	movl	r1,*16(ap)");
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	asm("	clrl	r0");
	asm("	movl	8(ap),r1");
	asm("	ashq	$-1,r0,r0");
	asm("	addl2	12(ap),r0");
	asm("	ediv	4(ap),r0,*16(ap),*20(ap)");
}

#endif

#ifdef MC68K

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	asm("	movl	d2,a7@-");
	asm("	movw	a6@(8),d0");
	asm("	mulu	a6@(14),d0");
	asm("	movw	a6@(10),d1");
	asm("	mulu	a6@(12),d1");
	asm("	addl	d1,d0");
	asm("	movw	a6@(8),d1");
	asm("	mulu	a6@(12),d1");
	asm("	movw	a6@(10),d2");
	asm("	mulu	a6@(14),d2");
	asm("	swap	d2");
	asm("	addw	d0,d2");
	asm("	swap	d2");
	asm("	swap	d0");
	asm("	addxw	d0,d1");
	asm("	clrl	d0");
	asm("	swap	d1");
	asm("	addxw	d0,d1");
	asm("	swap	d1");
	asm("	addl	a6@(16),d2");
	asm("	addxl	d0,d1");
	asm("	lsll	#1,d2");
	asm("	roxll	#1,d1");
	asm("	lsrl	#1,d2");
	asm("	movl	a6@(20),a0");
	asm("	movl	d1,a0@");
	asm("	movl	a6@(24),a0");
	asm("	movl	d2,a0@");
	asm("	movl	a7@+,d2");
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	asm("	moveml	#0x3000,a7@-");
	asm("	moveml	a6@(8),#0x307");
	asm("	lsll	#1,d2");
	asm("	addql	#1,d2");
	asm("	movw	#31,d3");
	asm("label2:	subl	d0,d1");
	asm("	bccs	label1");
	asm("	addl	d0,d1");
	asm("label1:	roxll	#1,d2");
	asm("	roxll	#1,d1");
	asm("	dbf	d3,label2");
	asm("	roxrl	#1,d1");
	asm("	notl	d2");
	asm("	movl	d2,a0@");
	asm("	movl	d1,a1@");
	asm("	moveml	a7@+,#0xc");
}

#endif

#ifndef NEWS
#ifdef MC68020

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	asm("	movl	d2,a7@-");
	asm("	clrl	d2");
	asm("	movl	a6@(8),d0");
	asm("	mulul	a6@(12),d1:d0");
	asm("	addl	a6@(16),d0");
	asm("	addxl	d2,d1");
	asm("	lsll	#1,d0");
	asm("	roxll	#1,d1");
	asm("	lsrl	#1,d0");
	asm("	movl	a6@(20),a0");
	asm("	movl	d1,a0@");
	asm("	movl	a6@(24),a0");
	asm("	movl	d0,a0@");
        asm("	movl	a7@+,d2");
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	asm("moveml	a6@(12),#0x303");
	asm("lsll	#1,d1");
	asm("lsrl	#1,d0");
	asm("roxrl	#1,d1");
	asm("divul	a6@(8),d0:d1");
	asm("movl	d0,a1@");
	asm("movl	d1,a0@");
}

#endif
#endif

#ifdef ATT3B2

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	register int *r8, *r7, *r6, *r5, *r4;

	asm("	movh	0(%ap),%r5");		/* R5 <= high(d) */
	asm("	andw3	&0xffff,0(%ap),%r6");	/* R6 <= low(d) */
	asm("	movh	4(%ap),%r7");		/* R7 <= high(q) */
	asm("	andw3	&0xffff,4(%ap),%r8");	/* R8 <= low(q) */

	asm("	mulw3	%r6,%r8,%r0");		/* R0 <= low(d)*low(q) */
	asm("	andw3	&0x80000000,%r0,%r1");	/* save MSB(R0) */
	asm("	lrsw3	&16,%r1,%r1");
	asm("	andw2	&0x7fffffff,%r0");	/* MSB(R0) <= 0 */
	asm("	addw2	8(%ap),%r0");		/* R0 <= R0 + r */
	asm("	andw3	&0xffff,%r0,%r4");	/* save low(R0) */

	asm("	lrsw3	&16,%r0,%r0");		/* R0 >> 16 */
	asm("	addw2	%r1,%r0");		/* resume MSB(R0) */

	asm("	mulw3	%r5,%r8,%r2");		/* R0 <= high(d)*low(q) */
	asm("	addw2	%r2,%r0");		/*       + R0           */
	asm("	andw3	&0x80000000,%r0,%r1");	/* save MSB(R0) */
	asm("	lrsw3	&15,%r1,%r1");
	asm("	andw2	&0x7fffffff,%r0");	/* MSB(R0) <= 0 */

	asm("	mulw3	%r7,%r6,%r2");		/* R0 <= low(d)*high(q) */
	asm("	addw2	%r2,%r0");		/*       + R0           */
	asm("	andw3	&0x7fff,%r0,%r2");	/* R2 <= low(R0) */
	asm("	llsw2	&16,%r2");		/* high <= low(R2)*2^16 */
	asm("	orw3	%r2,%r4,*16(%ap)");	/*         + low(R4)    */

	asm("	lrsw3	&15,%r0,%r0");		/* R0 >> 15, not 16 */
	asm("	addw2	%r1,%r0");		/* resume MSB(R0) */
	asm("	mulw3	%r5,%r7,%r1");		/* high <= high(d)*high(q) */ 
	asm("	llsw2	&1,%r1");		/*         + R0            */
	asm("	addw3	%r0,%r1,*12(%ap)");
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	register int *r8, *r7, *r6;

	asm("	movw	0(%ap),%r2");
	asm("	movw	4(%ap),%r0");
	asm("	movw	8(%ap),%r1");
	asm("	movw	&0,%r8");	/* quotient to go */
	asm("	movw	&31,%r7");	/* loop counter */

	asm("a:	llsw2	&1,%r8");	/* R8 << 1, no need 1st time */
	asm("	llsw2	&1,%r0");	/* R0 << 1 */
	asm("	llsw2	&1,%r1");	/* R1 << 1 */
	asm("	jge	b");		/* skip if MSB(R1) = 0 */
	asm("	orw2	&1,%r0");	/* LSB(R0) <= 1 */
	asm("b:	subw3	%r2,%r0,%r6");	/* R6 <= R0 - R2 */
	asm("	jl	c");		/* skip if R5 < 0 */
	asm("	movw	%r6,%r0");	/* R0 <= R0 - R2 */
	asm("	orw2	&1,%r8");	/* LSB(R8) <= 1 */
	asm("c:	subw2	&1,%r7");	/* R7 <= R7 - 1 */
	asm("	jg	a");		/* repeat while R3 > 0 */

	asm("	movw	%r8,*12(%ap)");
	asm("	movw	%r0,*16(%ap)");
}

#endif

#ifdef NS32K


































#endif

#ifdef S3000

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	long long int ld, lq, lr, z;
        int zh, zl;

        ld = d;
        lq = q;
        lr = r;
        z = ld*lq+lr;
        zl = (z & 0x000000007fffffffLL);
        zh = (z >> 31LL);
        *hp = zh;
        *lp = zl;
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	long long int lh, ld, ll;

	ld = d;
	lh = h;
	ll = l;
	lh = (lh << 31LL);
	lh = (lh | ll);
	*qp = (lh/ld);
	*rp = (lh%ld);
}

#endif

#ifdef IBMRT



























































































































#endif

#ifdef NEWS

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	asm("	move.l	d2,-(sp)");
	asm("	clr.l	d2");
	asm("	move.l	(8,fp),d0");
	asm("	mulu.l	(12,fp),d1:d0");
	asm("	add.l	(16,fp),d0");
	asm("	addx.l	d2,d1");
	asm("	lsl.l	#1,d0");
	asm("	roxl.l	#1,d1");
	asm("	lsr.l	#1,d0");
	asm("	move.l	(20,fp),a0");
	asm("	move.l	d1,(a0)");
	asm("	move.l	(24,a6),a0");
	asm("	move.l	d0,(a0)");
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	asm("movem.l	(12,fp),#0x303");
	asm("lsl.l	#1,d1");
	asm("lsr.l	#1,d0");
	asm("roxr.l	#1,d1");
	asm("divu.l	(8,fp),d0:d1");
	asm("move.l	d0,(a1)");
	asm("move.l	d1,(a0)");
}

#endif

#ifdef CONVEX

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	long long int ld, lq, lr, z;
        int zh, zl;

        ld = d;
        lq = q;
        lr = r;
        z = ld*lq+lr;
        zl = (z & 0x000000007fffffffLL);
        zh = (z >> 31LL);
        *hp = zh;
        *lp = zl;
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	long long int lh, ld, ll;

	ld = d;
	lh = h;
	ll = l;
	lh = (lh << 31LL);
	lh = (lh | ll);
	*qp = (lh/ld);
	*rp = (lh%ld);
}

#endif



#ifdef	hp9000s300

extended_mul(d, q, r, hp, lp)
int d, q, r;
int *hp, *lp;
{
	asm("	mov.l	%d2,-(%a7)");
	asm("	clr.l	%d2");
	asm("	mov.l	(8,%fp),%d0");
	asm("	mulu.l	(12,%fp),%d1:%d0");
	asm("	add.l	(16,%fp),%d0");
	asm("	addx.l	%d2,%d1");
	asm("	lsl.l	&1,%d0");
	asm("	roxl.l	&1,%d1");
	asm("	lsr.l	&1,%d0");
	asm("	mov.l	(20,%fp),%a0");
	asm("	mov.l	%d1,(%a0)");
	asm("	mov.l	(24,%fp),%a0");
	asm("	mov.l	%d0,(%a0)");
	asm("	mov.l	(%a7)+,%d2");
}

extended_div(d, h, l, qp, rp)
int d, h, l;
int *qp, *rp;
{
	asm("	movm.l	(12,%fp),&0x303");
	asm("	lsl.l	&1,%d1");
	asm("	lsr.l	&1,%d0");
	asm("	roxr.l	&1,%d1");
	asm("	divu.l	(8,%fp),%d0:%d1");
	asm("	mov.l	%d0,(%a1)");
	asm("	mov.l	%d1,(%a0)");
}

#endif

#ifdef CMAC
#include "cmac.c"
#endif

These are the contents of the former NiCE NeXT User Group NeXTSTEP/OpenStep software archive, currently hosted by Netfuture.ch.