1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# litop.s:
# This file is appended to the top of the 060FPLSP package
# and contains the entry points into the package. The user, in
# effect, branches to one of the branch table entries located here.
#
bra.l _060LSP__idivs64_
short 0x0000
bra.l _060LSP__idivu64_
short 0x0000
bra.l _060LSP__imuls64_
short 0x0000
bra.l _060LSP__imulu64_
short 0x0000
bra.l _060LSP__cmp2_Ab_
short 0x0000
bra.l _060LSP__cmp2_Aw_
short 0x0000
bra.l _060LSP__cmp2_Al_
short 0x0000
bra.l _060LSP__cmp2_Db_
short 0x0000
bra.l _060LSP__cmp2_Dw_
short 0x0000
bra.l _060LSP__cmp2_Dl_
short 0x0000
# leave room for future possible aditions.
align 0x200
#########################################################################
# XDEF **************************************************************** #
# _060LSP__idivu64_(): Emulate 64-bit unsigned div instruction. #
# _060LSP__idivs64_(): Emulate 64-bit signed div instruction. #
# #
# This is the library version which is accessed as a subroutine #
# and therefore does not work exactly like the 680X0 div{s,u}.l #
# 64-bit divide instruction. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# 0x4(sp) = divisor #
# 0x8(sp) = hi(dividend) #
# 0xc(sp) = lo(dividend) #
# 0x10(sp) = pointer to location to place quotient/remainder #
# #
# OUTPUT ************************************************************** #
# 0x10(sp) = points to location of remainder/quotient. #
# remainder is in first longword, quotient is in 2nd. #
# #
# ALGORITHM *********************************************************** #
# If the operands are signed, make them unsigned and save the #
# sign info for later. Separate out special cases like divide-by-zero #
# or 32-bit divides if possible. Else, use a special math algorithm #
# to calculate the result. #
# Restore sign info if signed instruction. Set the condition #
# codes before performing the final "rts". If the divisor was equal to #
# zero, then perform a divide-by-zero using a 16-bit implemented #
# divide instruction. This way, the operating system can record that #
# the event occurred even though it may not point to the correct place. #
# #
#########################################################################
set POSNEG, -1
set NDIVISOR, -2
set NDIVIDEND, -3
set DDSECOND, -4
set DDNORMAL, -8
set DDQUOTIENT, -12
set DIV64_CC, -16
##########
# divs.l #
##########
global _060LSP__idivs64_
_060LSP__idivs64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-16
movm.l &0x3f00,-(%sp) # save d2-d7
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,DIV64_CC(%a6)
st POSNEG(%a6) # signed operation
bra.b ldiv64_cont
##########
# divu.l #
##########
global _060LSP__idivu64_
_060LSP__idivu64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-16
movm.l &0x3f00,-(%sp) # save d2-d7
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,DIV64_CC(%a6)
sf POSNEG(%a6) # unsigned operation
ldiv64_cont:
mov.l 0x8(%a6),%d7 # fetch divisor
beq.w ldiv64eq0 # divisor is = 0!!!
mov.l 0xc(%a6), %d5 # get dividend hi
mov.l 0x10(%a6), %d6 # get dividend lo
# separate signed and unsigned divide
tst.b POSNEG(%a6) # signed or unsigned?
beq.b ldspecialcases # use positive divide
# save the sign of the divisor
# make divisor unsigned if it's negative
tst.l %d7 # chk sign of divisor
slt NDIVISOR(%a6) # save sign of divisor
bpl.b ldsgndividend
neg.l %d7 # complement negative divisor
# save the sign of the dividend
# make dividend unsigned if it's negative
ldsgndividend:
tst.l %d5 # chk sign of hi(dividend)
slt NDIVIDEND(%a6) # save sign of dividend
bpl.b ldspecialcases
mov.w &0x0, %cc # clear 'X' cc bit
negx.l %d6 # complement signed dividend
negx.l %d5
# extract some special cases:
# - is (dividend == 0) ?
# - is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
ldspecialcases:
tst.l %d5 # is (hi(dividend) == 0)
bne.b ldnormaldivide # no, so try it the long way
tst.l %d6 # is (lo(dividend) == 0), too
beq.w lddone # yes, so (dividend == 0)
cmp.l %d7,%d6 # is (divisor <= lo(dividend))
bls.b ld32bitdivide # yes, so use 32 bit divide
exg %d5,%d6 # q = 0, r = dividend
bra.w ldivfinish # can't divide, we're done.
ld32bitdivide:
tdivu.l %d7, %d5:%d6 # it's only a 32/32 bit div!
bra.b ldivfinish
ldnormaldivide:
# last special case:
# - is hi(dividend) >= divisor ? if yes, then overflow
cmp.l %d7,%d5
bls.b lddovf # answer won't fit in 32 bits
# perform the divide algorithm:
bsr.l ldclassical # do int divide
# separate into signed and unsigned finishes.
ldivfinish:
tst.b POSNEG(%a6) # do divs, divu separately
beq.b lddone # divu has no processing!!!
# it was a divs.l, so ccode setting is a little more complicated...
tst.b NDIVIDEND(%a6) # remainder has same sign
beq.b ldcc # as dividend.
neg.l %d5 # sgn(rem) = sgn(dividend)
ldcc:
mov.b NDIVISOR(%a6), %d0
eor.b %d0, NDIVIDEND(%a6) # chk if quotient is negative
beq.b ldqpos # branch to quot positive
# 0x80000000 is the largest number representable as a 32-bit negative
# number. the negative of 0x80000000 is 0x80000000.
cmpi.l %d6, &0x80000000 # will (-quot) fit in 32 bits?
bhi.b lddovf
neg.l %d6 # make (-quot) 2's comp
bra.b lddone
ldqpos:
btst &0x1f, %d6 # will (+quot) fit in 32 bits?
bne.b lddovf
lddone:
# if the register numbers are the same, only the quotient gets saved.
# so, if we always save the quotient second, we save ourselves a cmp&beq
andi.w &0x10,DIV64_CC(%a6)
mov.w DIV64_CC(%a6),%cc
tst.l %d6 # may set 'N' ccode bit
# here, the result is in d1 and d0. the current strategy is to save
# the values at the location pointed to by a0.
# use movm here to not disturb the condition codes.
ldexit:
movm.l &0x0060,([0x14,%a6]) # save result
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x00fc # restore d2-d7
unlk %a6
# EPILOGUE END ##########################################################
rts
# the result should be the unchanged dividend
lddovf:
mov.l 0xc(%a6), %d5 # get dividend hi
mov.l 0x10(%a6), %d6 # get dividend lo
andi.w &0x1c,DIV64_CC(%a6)
ori.w &0x02,DIV64_CC(%a6) # set 'V' ccode bit
mov.w DIV64_CC(%a6),%cc
bra.b ldexit
ldiv64eq0:
mov.l 0xc(%a6),([0x14,%a6])
mov.l 0x10(%a6),([0x14,%a6],0x4)
mov.w DIV64_CC(%a6),%cc
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x00fc # restore d2-d7
unlk %a6
# EPILOGUE END ##########################################################
divu.w &0x0,%d0 # force a divbyzero exception
rts
###########################################################################
#########################################################################
# This routine uses the 'classical' Algorithm D from Donald Knuth's #
# Art of Computer Programming, vol II, Seminumerical Algorithms. #
# For this implementation b=2**16, and the target is U1U2U3U4/V1V2, #
# where U,V are words of the quadword dividend and longword divisor, #
# and U1, V1 are the most significant words. #
# #
# The most sig. longword of the 64 bit dividend must be in %d5, least #
# in %d6. The divisor must be in the variable ddivisor, and the #
# signed/unsigned flag ddusign must be set (0=unsigned,1=signed). #
# The quotient is returned in %d6, remainder in %d5, unless the #
# v (overflow) bit is set in the saved %ccr. If overflow, the dividend #
# is unchanged. #
#########################################################################
ldclassical:
# if the divisor msw is 0, use simpler algorithm then the full blown
# one at ddknuth:
cmpi.l %d7, &0xffff
bhi.b lddknuth # go use D. Knuth algorithm
# Since the divisor is only a word (and larger than the mslw of the dividend),
# a simpler algorithm may be used :
# In the general case, four quotient words would be created by
# dividing the divisor word into each dividend word. In this case,
# the first two quotient words must be zero, or overflow would occur.
# Since we already checked this case above, we can treat the most significant
# longword of the dividend as (0) remainder (see Knuth) and merely complete
# the last two divisions to get a quotient longword and word remainder:
clr.l %d1
swap %d5 # same as r*b if previous step rqd
swap %d6 # get u3 to lsw position
mov.w %d6, %d5 # rb + u3
divu.w %d7, %d5
mov.w %d5, %d1 # first quotient word
swap %d6 # get u4
mov.w %d6, %d5 # rb + u4
divu.w %d7, %d5
swap %d1
mov.w %d5, %d1 # 2nd quotient 'digit'
clr.w %d5
swap %d5 # now remainder
mov.l %d1, %d6 # and quotient
rts
lddknuth:
# In this algorithm, the divisor is treated as a 2 digit (word) number
# which is divided into a 3 digit (word) dividend to get one quotient
# digit (word). After subtraction, the dividend is shifted and the
# process repeated. Before beginning, the divisor and quotient are
# 'normalized' so that the process of estimating the quotient digit
# will yield verifiably correct results..
clr.l DDNORMAL(%a6) # count of shifts for normalization
clr.b DDSECOND(%a6) # clear flag for quotient digits
clr.l %d1 # %d1 will hold trial quotient
lddnchk:
btst &31, %d7 # must we normalize? first word of
bne.b lddnormalized # divisor (V1) must be >= 65536/2
addq.l &0x1, DDNORMAL(%a6) # count normalization shifts
lsl.l &0x1, %d7 # shift the divisor
lsl.l &0x1, %d6 # shift u4,u3 with overflow to u2
roxl.l &0x1, %d5 # shift u1,u2
bra.w lddnchk
lddnormalized:
# Now calculate an estimate of the quotient words (msw first, then lsw).
# The comments use subscripts for the first quotient digit determination.
mov.l %d7, %d3 # divisor
mov.l %d5, %d2 # dividend mslw
swap %d2
swap %d3
cmp.w %d2, %d3 # V1 = U1 ?
bne.b lddqcalc1
mov.w &0xffff, %d1 # use max trial quotient word
bra.b lddadj0
lddqcalc1:
mov.l %d5, %d1
divu.w %d3, %d1 # use quotient of mslw/msw
andi.l &0x0000ffff, %d1 # zero any remainder
lddadj0:
# now test the trial quotient and adjust. This step plus the
# normalization assures (according to Knuth) that the trial
# quotient will be at worst 1 too large.
mov.l %d6, -(%sp)
clr.w %d6 # word u3 left
swap %d6 # in lsw position
lddadj1: mov.l %d7, %d3
mov.l %d1, %d2
mulu.w %d7, %d2 # V2q
swap %d3
mulu.w %d1, %d3 # V1q
mov.l %d5, %d4 # U1U2
sub.l %d3, %d4 # U1U2 - V1q
swap %d4
mov.w %d4,%d0
mov.w %d6,%d4 # insert lower word (U3)
tst.w %d0 # is upper word set?
bne.w lddadjd1
# add.l %d6, %d4 # (U1U2 - V1q) + U3
cmp.l %d2, %d4
bls.b lddadjd1 # is V2q > (U1U2-V1q) + U3 ?
subq.l &0x1, %d1 # yes, decrement and recheck
bra.b lddadj1
lddadjd1:
# now test the word by multiplying it by the divisor (V1V2) and comparing
# the 3 digit (word) result with the current dividend words
mov.l %d5, -(%sp) # save %d5 (%d6 already saved)
mov.l %d1, %d6
swap %d6 # shift answer to ms 3 words
mov.l %d7, %d5
bsr.l ldmm2
mov.l %d5, %d2 # now %d2,%d3 are trial*divisor
mov.l %d6, %d3
mov.l (%sp)+, %d5 # restore dividend
mov.l (%sp)+, %d6
sub.l %d3, %d6
subx.l %d2, %d5 # subtract double precision
bcc ldd2nd # no carry, do next quotient digit
subq.l &0x1, %d1 # q is one too large
# need to add back divisor longword to current ms 3 digits of dividend
# - according to Knuth, this is done only 2 out of 65536 times for random
# divisor, dividend selection.
clr.l %d2
mov.l %d7, %d3
swap %d3
clr.w %d3 # %d3 now ls word of divisor
add.l %d3, %d6 # aligned with 3rd word of dividend
addx.l %d2, %d5
mov.l %d7, %d3
clr.w %d3 # %d3 now ms word of divisor
swap %d3 # aligned with 2nd word of dividend
add.l %d3, %d5
ldd2nd:
tst.b DDSECOND(%a6) # both q words done?
bne.b lddremain
# first quotient digit now correct. store digit and shift the
# (subtracted) dividend
mov.w %d1, DDQUOTIENT(%a6)
clr.l %d1
swap %d5
swap %d6
mov.w %d6, %d5
clr.w %d6
st DDSECOND(%a6) # second digit
bra.w lddnormalized
lddremain:
# add 2nd word to quotient, get the remainder.
mov.w %d1, DDQUOTIENT+2(%a6)
# shift down one word/digit to renormalize remainder.
mov.w %d5, %d6
swap %d6
swap %d5
mov.l DDNORMAL(%a6), %d7 # get norm shift count
beq.b lddrn
subq.l &0x1, %d7 # set for loop count
lddnlp:
lsr.l &0x1, %d5 # shift into %d6
roxr.l &0x1, %d6
dbf %d7, lddnlp
lddrn:
mov.l %d6, %d5 # remainder
mov.l DDQUOTIENT(%a6), %d6 # quotient
rts
ldmm2:
# factors for the 32X32->64 multiplication are in %d5 and %d6.
# returns 64 bit result in %d5 (hi) %d6(lo).
# destroys %d2,%d3,%d4.
# multiply hi,lo words of each factor to get 4 intermediate products
mov.l %d6, %d2
mov.l %d6, %d3
mov.l %d5, %d4
swap %d3
swap %d4
mulu.w %d5, %d6 # %d6 <- lsw*lsw
mulu.w %d3, %d5 # %d5 <- msw-dest*lsw-source
mulu.w %d4, %d2 # %d2 <- msw-source*lsw-dest
mulu.w %d4, %d3 # %d3 <- msw*msw
# now use swap and addx to consolidate to two longwords
clr.l %d4
swap %d6
add.w %d5, %d6 # add msw of l*l to lsw of m*l product
addx.w %d4, %d3 # add any carry to m*m product
add.w %d2, %d6 # add in lsw of other m*l product
addx.w %d4, %d3 # add any carry to m*m product
swap %d6 # %d6 is low 32 bits of final product
clr.w %d5
clr.w %d2 # lsw of two mixed products used,
swap %d5 # now use msws of longwords
swap %d2
add.l %d2, %d5
add.l %d3, %d5 # %d5 now ms 32 bits of final product
rts
#########################################################################
# XDEF **************************************************************** #
# _060LSP__imulu64_(): Emulate 64-bit unsigned mul instruction #
# _060LSP__imuls64_(): Emulate 64-bit signed mul instruction. #
# #
# This is the library version which is accessed as a subroutine #
# and therefore does not work exactly like the 680X0 mul{s,u}.l #
# 64-bit multiply instruction. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# 0x4(sp) = multiplier #
# 0x8(sp) = multiplicand #
# 0xc(sp) = pointer to location to place 64-bit result #
# #
# OUTPUT ************************************************************** #
# 0xc(sp) = points to location of 64-bit result #
# #
# ALGORITHM *********************************************************** #
# Perform the multiply in pieces using 16x16->32 unsigned #
# multiplies and "add" instructions. #
# Set the condition codes as appropriate before performing an #
# "rts". #
# #
#########################################################################
set MUL64_CC, -4
global _060LSP__imulu64_
_060LSP__imulu64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
mov.l 0x8(%a6),%d0 # store multiplier in d0
beq.w mulu64_zero # handle zero separately
mov.l 0xc(%a6),%d1 # get multiplicand in d1
beq.w mulu64_zero # handle zero separately
#########################################################################
# 63 32 0 #
# ---------------------------- #
# | hi(mplier) * hi(mplicand)| #
# ---------------------------- #
# ----------------------------- #
# | hi(mplier) * lo(mplicand) | #
# ----------------------------- #
# ----------------------------- #
# | lo(mplier) * hi(mplicand) | #
# ----------------------------- #
# | ----------------------------- #
# --|-- | lo(mplier) * lo(mplicand) | #
# | ----------------------------- #
# ======================================================== #
# -------------------------------------------------------- #
# | hi(result) | lo(result) | #
# -------------------------------------------------------- #
#########################################################################
mulu64_alg:
# load temp registers with operands
mov.l %d0,%d2 # mr in d2
mov.l %d0,%d3 # mr in d3
mov.l %d1,%d4 # md in d4
swap %d3 # hi(mr) in lo d3
swap %d4 # hi(md) in lo d4
# complete necessary multiplies:
mulu.w %d1,%d0 # [1] lo(mr) * lo(md)
mulu.w %d3,%d1 # [2] hi(mr) * lo(md)
mulu.w %d4,%d2 # [3] lo(mr) * hi(md)
mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
# add lo portions of [2],[3] to hi portion of [1].
# add carries produced from these adds to [4].
# lo([1]) is the final lo 16 bits of the result.
clr.l %d4 # load d4 w/ zero value
swap %d0 # hi([1]) <==> lo([1])
add.w %d1,%d0 # hi([1]) + lo([2])
addx.l %d4,%d3 # [4] + carry
add.w %d2,%d0 # hi([1]) + lo([3])
addx.l %d4,%d3 # [4] + carry
swap %d0 # lo([1]) <==> hi([1])
# lo portions of [2],[3] have been added in to final result.
# now, clear lo, put hi in lo reg, and add to [4]
clr.w %d1 # clear lo([2])
clr.w %d2 # clear hi([3])
swap %d1 # hi([2]) in lo d1
swap %d2 # hi([3]) in lo d2
add.l %d2,%d1 # [4] + hi([2])
add.l %d3,%d1 # [4] + hi([3])
# now, grab the condition codes. only one that can be set is 'N'.
# 'N' CAN be set if the operation is unsigned if bit 63 is set.
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4 # keep old 'X' bit
tst.l %d1 # may set 'N' bit
bpl.b mulu64_ddone
ori.b &0x8,%d4 # set 'N' bit
mulu64_ddone:
mov.w %d4,%cc
# here, the result is in d1 and d0. the current strategy is to save
# the values at the location pointed to by a0.
# use movm here to not disturb the condition codes.
mulu64_end:
exg %d1,%d0
movm.l &0x0003,([0x10,%a6]) # save result
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x001c # restore d2-d4
unlk %a6
# EPILOGUE END ##########################################################
rts
# one or both of the operands is zero so the result is also zero.
# save the zero result to the register file and set the 'Z' ccode bit.
mulu64_zero:
clr.l %d0
clr.l %d1
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4
ori.b &0x4,%d4
mov.w %d4,%cc # set 'Z' ccode bit
bra.b mulu64_end
##########
# muls.l #
##########
global _060LSP__imuls64_
_060LSP__imuls64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3c00,-(%sp) # save d2-d5
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
mov.l 0x8(%a6),%d0 # store multiplier in d0
beq.b mulu64_zero # handle zero separately
mov.l 0xc(%a6),%d1 # get multiplicand in d1
beq.b mulu64_zero # handle zero separately
clr.b %d5 # clear sign tag
tst.l %d0 # is multiplier negative?
bge.b muls64_chk_md_sgn # no
neg.l %d0 # make multiplier positive
ori.b &0x1,%d5 # save multiplier sgn
# the result sign is the exclusive or of the operand sign bits.
muls64_chk_md_sgn:
tst.l %d1 # is multiplicand negative?
bge.b muls64_alg # no
neg.l %d1 # make multiplicand positive
eori.b &0x1,%d5 # calculate correct sign
#########################################################################
# 63 32 0 #
# ---------------------------- #
# | hi(mplier) * hi(mplicand)| #
# ---------------------------- #
# ----------------------------- #
# | hi(mplier) * lo(mplicand) | #
# ----------------------------- #
# ----------------------------- #
# | lo(mplier) * hi(mplicand) | #
# ----------------------------- #
# | ----------------------------- #
# --|-- | lo(mplier) * lo(mplicand) | #
# | ----------------------------- #
# ======================================================== #
# -------------------------------------------------------- #
# | hi(result) | lo(result) | #
# -------------------------------------------------------- #
#########################################################################
muls64_alg:
# load temp registers with operands
mov.l %d0,%d2 # mr in d2
mov.l %d0,%d3 # mr in d3
mov.l %d1,%d4 # md in d4
swap %d3 # hi(mr) in lo d3
swap %d4 # hi(md) in lo d4
# complete necessary multiplies:
mulu.w %d1,%d0 # [1] lo(mr) * lo(md)
mulu.w %d3,%d1 # [2] hi(mr) * lo(md)
mulu.w %d4,%d2 # [3] lo(mr) * hi(md)
mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
# add lo portions of [2],[3] to hi portion of [1].
# add carries produced from these adds to [4].
# lo([1]) is the final lo 16 bits of the result.
clr.l %d4 # load d4 w/ zero value
swap %d0 # hi([1]) <==> lo([1])
add.w %d1,%d0 # hi([1]) + lo([2])
addx.l %d4,%d3 # [4] + carry
add.w %d2,%d0 # hi([1]) + lo([3])
addx.l %d4,%d3 # [4] + carry
swap %d0 # lo([1]) <==> hi([1])
# lo portions of [2],[3] have been added in to final result.
# now, clear lo, put hi in lo reg, and add to [4]
clr.w %d1 # clear lo([2])
clr.w %d2 # clear hi([3])
swap %d1 # hi([2]) in lo d1
swap %d2 # hi([3]) in lo d2
add.l %d2,%d1 # [4] + hi([2])
add.l %d3,%d1 # [4] + hi([3])
tst.b %d5 # should result be signed?
beq.b muls64_done # no
# result should be a signed negative number.
# compute 2's complement of the unsigned number:
# -negate all bits and add 1
muls64_neg:
not.l %d0 # negate lo(result) bits
not.l %d1 # negate hi(result) bits
addq.l &1,%d0 # add 1 to lo(result)
addx.l %d4,%d1 # add carry to hi(result)
muls64_done:
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4 # keep old 'X' bit
tst.l %d1 # may set 'N' bit
bpl.b muls64_ddone
ori.b &0x8,%d4 # set 'N' bit
muls64_ddone:
mov.w %d4,%cc
# here, the result is in d1 and d0. the current strategy is to save
# the values at the location pointed to by a0.
# use movm here to not disturb the condition codes.
muls64_end:
exg %d1,%d0
movm.l &0x0003,([0x10,%a6]) # save result at (a0)
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x003c # restore d2-d5
unlk %a6
# EPILOGUE END ##########################################################
rts
# one or both of the operands is zero so the result is also zero.
# save the zero result to the register file and set the 'Z' ccode bit.
muls64_zero:
clr.l %d0
clr.l %d1
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4
ori.b &0x4,%d4
mov.w %d4,%cc # set 'Z' ccode bit
bra.b muls64_end
#########################################################################
# XDEF **************************************************************** #
# _060LSP__cmp2_Ab_(): Emulate "cmp2.b An,<ea>". #
# _060LSP__cmp2_Aw_(): Emulate "cmp2.w An,<ea>". #
# _060LSP__cmp2_Al_(): Emulate "cmp2.l An,<ea>". #
# _060LSP__cmp2_Db_(): Emulate "cmp2.b Dn,<ea>". #
# _060LSP__cmp2_Dw_(): Emulate "cmp2.w Dn,<ea>". #
# _060LSP__cmp2_Dl_(): Emulate "cmp2.l Dn,<ea>". #
# #
# This is the library version which is accessed as a subroutine #
# and therefore does not work exactly like the 680X0 "cmp2" #
# instruction. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# 0x4(sp) = Rn #
# 0x8(sp) = pointer to boundary pair #
# #
# OUTPUT ************************************************************** #
# cc = condition codes are set correctly #
# #
# ALGORITHM *********************************************************** #
# In the interest of simplicity, all operands are converted to #
# longword size whether the operation is byte, word, or long. The #
# bounds are sign extended accordingly. If Rn is a data register, Rn is #
# also sign extended. If Rn is an address register, it need not be sign #
# extended since the full register is always used. #
# The condition codes are set correctly before the final "rts". #
# #
#########################################################################
set CMP2_CC, -4
global _060LSP__cmp2_Ab_
_060LSP__cmp2_Ab_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.b ([0xc,%a6],0x0),%d0
mov.b ([0xc,%a6],0x1),%d1
extb.l %d0 # sign extend lo bnd
extb.l %d1 # sign extend hi bnd
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Aw_
_060LSP__cmp2_Aw_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.w ([0xc,%a6],0x0),%d0
mov.w ([0xc,%a6],0x2),%d1
ext.l %d0 # sign extend lo bnd
ext.l %d1 # sign extend hi bnd
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Al_
_060LSP__cmp2_Al_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.l ([0xc,%a6],0x0),%d0
mov.l ([0xc,%a6],0x4),%d1
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Db_
_060LSP__cmp2_Db_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.b ([0xc,%a6],0x0),%d0
mov.b ([0xc,%a6],0x1),%d1
extb.l %d0 # sign extend lo bnd
extb.l %d1 # sign extend hi bnd
# operation is a data register compare.
# sign extend byte to long so we can do simple longword compares.
extb.l %d2 # sign extend data byte
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Dw_
_060LSP__cmp2_Dw_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.w ([0xc,%a6],0x0),%d0
mov.w ([0xc,%a6],0x2),%d1
ext.l %d0 # sign extend lo bnd
ext.l %d1 # sign extend hi bnd
# operation is a data register compare.
# sign extend word to long so we can do simple longword compares.
ext.l %d2 # sign extend data word
bra.w l_cmp2_cmp # go emulate compare
global _060LSP__cmp2_Dl_
_060LSP__cmp2_Dl_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.l ([0xc,%a6],0x0),%d0
mov.l ([0xc,%a6],0x4),%d1
#
# To set the ccodes correctly:
# (1) save 'Z' bit from (Rn - lo)
# (2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
# (3) keep 'X', 'N', and 'V' from before instruction
# (4) combine ccodes
#
l_cmp2_cmp:
sub.l %d0, %d2 # (Rn - lo)
mov.w %cc, %d3 # fetch resulting ccodes
andi.b &0x4, %d3 # keep 'Z' bit
sub.l %d0, %d1 # (hi - lo)
cmp.l %d1,%d2 # ((hi - lo) - (Rn - hi))
mov.w %cc, %d4 # fetch resulting ccodes
or.b %d4, %d3 # combine w/ earlier ccodes
andi.b &0x5, %d3 # keep 'Z' and 'N'
mov.w CMP2_CC(%a6), %d4 # fetch old ccodes
andi.b &0x1a, %d4 # keep 'X','N','V' bits
or.b %d3, %d4 # insert new ccodes
mov.w %d4,%cc # save new ccodes
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x001c # restore d2-d4
unlk %a6
# EPILOGUE END ##########################################################
rts
|